Exoscale
Cloudfleet supports adding Exoscale instances as nodes in your CFKE cluster. The most streamlined and scalable approach is to use the Cloudfleet Terraform provider’s cloud-init integration. This method allows you to create Exoscale instance pools with Terraform while using the cloud-init configuration generated by the Cloudfleet Terraform provider to automatically register the instances with your CFKE cluster.
Prerequisites
- You have the Terraform CLI installed on your workstation.
- You configured the Cloudfleet CLI by following the instructions here. You can alternatively use the token authentication by following the instructions here.
- You have an Exoscale account with API credentials. You can create API credentials from the Exoscale console.
- You have a CFKE cluster running. The example below assumes that you have a cluster with the ID
CLUSTER_IDbut you can also create the cluster with Terraform as shown in the Terraform introduction.
Adding Exoscale instances to your CFKE cluster
Use the following Terraform configuration to create Exoscale instance pools and integrate them with your CFKE cluster. Replace the cfke_cluster_id, exoscale_api_key, and exoscale_api_secret variables with your actual values.
variable "cfke_cluster_id" {
type = string
default = "CFKE Cluster ID"
}
variable "exoscale_api_key" {
type = string
}
variable "exoscale_api_secret" {
type = string
}
variable "zone" {
type = string
default = "ch-gva-2"
}
terraform {
required_providers {
cloudfleet = {
source = "terraform.cloudfleet.ai/cloudfleet/cloudfleet"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.0"
}
exoscale = {
source = "exoscale/exoscale"
}
}
}
provider "exoscale" {
key = var.exoscale_api_key
secret = var.exoscale_api_secret
}
data "cloudfleet_cfke_cluster" "cluster" {
id = var.cfke_cluster_id
}
// Generate the cloud-init configuration to join the Exoscale instances to the CFKE cluster
resource "cloudfleet_cfke_node_join_information" "exoscale" {
cluster_id = data.cloudfleet_cfke_cluster.cluster.id
region = var.zone
zone = var.zone
node_labels = {
"cfke.io/provider" = "exoscale"
}
}
// Exoscale resources
// Create a private network for the CFKE nodes
resource "exoscale_private_network" "cfke_network" {
zone = var.zone
name = "cfke-private-network"
description = "CFKE Private Network"
netmask = "255.255.255.0"
start_ip = "10.0.0.50"
end_ip = "10.0.0.250"
}
// Create a security group for the CFKE nodes
resource "exoscale_security_group" "cfke_security_group" {
name = "cfke-security-group"
description = "CFKE Security Group"
}
data "exoscale_template" "cfke_template" {
zone = var.zone
name = "Linux Ubuntu 24.04 LTS 64-bit"
}
// Create an instance pool for CFKE worker nodes
resource "exoscale_instance_pool" "cfke" {
zone = var.zone
name = "cfke-worker"
instance_prefix = "cfke-worker"
template_id = data.exoscale_template.cfke_template.id
instance_type = "standard.medium"
disk_size = 10
size = 3
ipv6 = true
security_group_ids = [exoscale_security_group.cfke_security_group.id]
network_ids = [exoscale_private_network.cfke_network.id]
user_data = cloudfleet_cfke_node_join_information.exoscale.rendered // Use the generated cloud-init configuration
}
After creating the Terraform configuration, run these commands to provision the Exoscale instances and integrate them with your CFKE cluster:
terraform init
terraform apply
After deployment completes, verify the nodes have joined your cluster:
kubectl get nodes
Adding a load balancer
To expose workloads to the internet, create a Kubernetes service of type NodePort and configure an Exoscale Network Load Balancer to forward traffic to the nodes on that port. The example below demonstrates a simple Nginx deployment exposed via port 31001 with a corresponding Exoscale Network Load Balancer configuration.
locals {
node_port = 31001
}
// Security group rule to allow traffic to the NodePort
resource "exoscale_security_group_rule" "nodeport" {
security_group_id = exoscale_security_group.cfke_security_group.id
description = "NodePort range"
type = "INGRESS"
protocol = "TCP"
start_port = local.node_port
end_port = local.node_port
cidr = "0.0.0.0/0"
}
// Create Network Load Balancer
resource "exoscale_nlb" "nginx" {
zone = var.zone
name = "nginx"
}
resource "exoscale_nlb_service" "nginx" {
nlb_id = exoscale_nlb.nginx.id
zone = exoscale_nlb.nginx.zone
name = "nginx"
instance_pool_id = exoscale_instance_pool.cfke.id
protocol = "tcp"
port = 80
target_port = local.node_port
strategy = "round-robin"
healthcheck {
mode = "http"
port = local.node_port
uri = "/"
interval = 5
timeout = 3
retries = 1
}
}
// An example Kubernetes deployment using a Load Balancer
data "cloudfleet_client_config" "me" {}
provider "kubernetes" {
host = data.cloudfleet_cfke_cluster.cluster.endpoint
cluster_ca_certificate = data.cloudfleet_cfke_cluster.cluster.certificate_authority
token = data.cloudfleet_client_config.me.access_token
}
resource "kubernetes_namespace" "test" {
depends_on = [
data.cloudfleet_cfke_cluster.cluster
]
metadata {
name = "exoscale"
}
}
resource "kubernetes_deployment" "app" {
wait_for_rollout = false
metadata {
name = "nginx"
namespace = kubernetes_namespace.test.id
labels = {
app = "nginx"
}
}
spec {
replicas = 2
selector {
match_labels = {
app = "nginx"
}
}
template {
metadata {
labels = {
app = "nginx"
}
}
spec {
affinity {
node_affinity {
required_during_scheduling_ignored_during_execution {
node_selector_term {
match_expressions {
key = "cfke.io/provider"
operator = "In"
values = [
"exoscale"
]
}
}
}
}
}
container {
image = "nginx:latest"
name = "nginx"
port {
name = "http"
container_port = 80
}
}
}
}
}
}
resource "kubernetes_service" "app" {
depends_on = [
data.cloudfleet_cfke_cluster.cluster
]
metadata {
name = "nginx"
namespace = kubernetes_namespace.test.id
labels = {
app = "nginx"
}
}
spec {
selector = {
app = "nginx"
}
external_traffic_policy = "Local"
type = "NodePort"
port {
name = "http"
port = 80
target_port = 80
node_port = local.node_port
}
}
}
output "load_balancer_ip" {
value = exoscale_nlb.nginx.ip_address
}
For production deployments, consider installing an Ingress controller instead of this basic NodePort example.
Security configuration
Configure the Exoscale security group to secure your cluster with these essential rules:
- Port 31001 (TCP): Allow ingress traffic for the NodePort service. The example above allows traffic from all sources (0.0.0.0/0), but for production environments, consider restricting access to specific IP ranges or the load balancer IP addresses only.
This configuration ensures that external traffic flows through the Network Load Balancer, which provides centralized access control and public exposure management. The private network enables secure communication between nodes within the same instance pool.
← Scaleway