feat: add nat gateway and vpc peering support

This commit is contained in:
Pierre Mavro
2021-09-23 19:22:59 +02:00
committed by Pierre Mavro
parent e2f0a1ff39
commit d0bb17d9f6
14 changed files with 435 additions and 179 deletions

View File

@@ -1,5 +0,0 @@
resource "aws_key_pair" "qovery_ssh_key_{{ kubernetes_cluster_id }}" {
key_name = var.ec2_ssh_default_key.key_name
public_key = var.ec2_ssh_default_key.public_key
tags = local.tags_eks
}

View File

@@ -5,13 +5,20 @@ locals {
}
locals {
tags_eks = {
tags_common = {
ClusterId = var.kubernetes_cluster_id
ClusterName = var.kubernetes_cluster_name,
Region = var.region
creationDate = time_static.on_cluster_create.rfc3339
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %}
}
tags_eks = merge(
local.tags_common,
{
"Service" = "EKS"
}
)
}
resource "time_static" "on_cluster_create" {}
@@ -32,7 +39,16 @@ resource "aws_eks_cluster" "eks_cluster" {
vpc_config {
security_group_ids = [aws_security_group.eks_cluster.id]
subnet_ids = flatten([aws_subnet.eks_zone_a.*.id, aws_subnet.eks_zone_b.*.id,aws_subnet.eks_zone_c.*.id])
subnet_ids = flatten([
aws_subnet.eks_zone_a[*].id,
aws_subnet.eks_zone_b[*].id,
aws_subnet.eks_zone_c[*].id,
{% if vpc_qovery_network_mode == "WithNatGateways" %}
aws_subnet.eks_zone_a_public[*].id,
aws_subnet.eks_zone_b_public[*].id,
aws_subnet.eks_zone_c_public[*].id
{% endif %}
])
}
tags = local.tags_eks
@@ -41,6 +57,5 @@ resource "aws_eks_cluster" "eks_cluster" {
aws_iam_role_policy_attachment.eks_cluster_AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.eks_cluster_AmazonEKSServicePolicy,
aws_cloudwatch_log_group.eks_cloudwatch_log_group,
aws_key_pair.qovery_ssh_key_{{ kubernetes_cluster_id }}
]
}

View File

@@ -0,0 +1,42 @@
data "aws_availability_zones" "available" {}
locals {
tags_eks_vpc = merge(
local.tags_common,
{
Name = "qovery-eks-workers",
"kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared",
"kubernetes.io/role/elb" = 1,
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %}
}
)
tags_eks_vpc_public = merge(
local.tags_eks,
{
"Public" = "true"
}
)
tags_eks_vpc_private = merge(
local.tags_eks,
{
"Public" = "false"
}
)
}
# VPC
resource "aws_vpc" "eks" {
cidr_block = var.vpc_cidr_block
enable_dns_hostnames = true
tags = local.tags_eks_vpc
}
# Internet gateway
resource "aws_internet_gateway" "eks_cluster" {
vpc_id = aws_vpc.eks.id
tags = local.tags_eks_vpc
}

View File

@@ -0,0 +1,209 @@
{% if vpc_qovery_network_mode == "WithNatGateways" %}
# External IPs
resource "aws_eip" "eip_zone_a" {
vpc = true
tags = local.tags_eks_vpc
}
resource "aws_eip" "eip_zone_b" {
vpc = true
tags = local.tags_eks_vpc
}
resource "aws_eip" "eip_zone_c" {
vpc = true
tags = local.tags_eks_vpc
}
# Public subnets
resource "aws_subnet" "eks_zone_a_public" {
count = length(var.eks_subnets_zone_a_public)
// todo: provide a static list in order to avoid a possible outage if AWS add a new zone
availability_zone = data.aws_availability_zones.available.names[0]
cidr_block = var.eks_subnets_zone_a_public[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc_public
}
resource "aws_subnet" "eks_zone_b_public" {
count = length(var.eks_subnets_zone_b_public)
availability_zone = data.aws_availability_zones.available.names[1]
cidr_block = var.eks_subnets_zone_b_public[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc_public
}
resource "aws_subnet" "eks_zone_c_public" {
count = length(var.eks_subnets_zone_c_public)
availability_zone = data.aws_availability_zones.available.names[2]
cidr_block = var.eks_subnets_zone_c_public[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc_public
}
# Public Nat gateways
resource "aws_nat_gateway" "eks_zone_a_public" {
count = length(var.eks_subnets_zone_a_public)
allocation_id = aws_eip.eip_zone_a.id
subnet_id = aws_subnet.eks_zone_a_public[count.index].id
tags = local.tags_eks_vpc_public
}
resource "aws_nat_gateway" "eks_zone_b_public" {
count = length(var.eks_subnets_zone_b_public)
allocation_id = aws_eip.eip_zone_b.id
subnet_id = aws_subnet.eks_zone_b_public[count.index].id
tags = local.tags_eks_vpc_public
}
resource "aws_nat_gateway" "eks_zone_c_public" {
count = length(var.eks_subnets_zone_c_public)
allocation_id = aws_eip.eip_zone_c.id
subnet_id = aws_subnet.eks_zone_c_public[count.index].id
tags = local.tags_eks_vpc_public
}
# Public Routing table
resource "aws_route_table" "eks_cluster" {
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.eks_cluster.id
}
tags = local.tags_eks_vpc_public
}
resource "aws_route_table_association" "eks_cluster_zone_a_public" {
count = length(var.eks_subnets_zone_a_public)
subnet_id = aws_subnet.eks_zone_a_public.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_b_public" {
count = length(var.eks_subnets_zone_b_public)
subnet_id = aws_subnet.eks_zone_b_public.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_c_public" {
count = length(var.eks_subnets_zone_c_public)
subnet_id = aws_subnet.eks_zone_c_public.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
# Private subnets
resource "aws_subnet" "eks_zone_a" {
count = length(var.eks_subnets_zone_a_private)
availability_zone = data.aws_availability_zones.available.names[0]
cidr_block = var.eks_subnets_zone_a_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = false
tags = local.tags_eks_vpc_private
}
resource "aws_subnet" "eks_zone_b" {
count = length(var.eks_subnets_zone_b_private)
availability_zone = data.aws_availability_zones.available.names[1]
cidr_block = var.eks_subnets_zone_b_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = false
tags = local.tags_eks_vpc_private
}
resource "aws_subnet" "eks_zone_c" {
count = length(var.eks_subnets_zone_c_private)
availability_zone = data.aws_availability_zones.available.names[2]
cidr_block = var.eks_subnets_zone_c_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = false
tags = local.tags_eks_vpc_private
}
# Routing table
resource "aws_route_table" "eks_cluster_zone_a_private" {
count = length(aws_nat_gateway.eks_zone_a_public)
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.eks_zone_a_public[count.index].id
}
tags = local.tags_eks_vpc_private
}
resource "aws_route_table" "eks_cluster_zone_b_private" {
count = length(aws_nat_gateway.eks_zone_b_public)
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.eks_zone_b_public[count.index].id
}
tags = local.tags_eks_vpc_private
}
resource "aws_route_table" "eks_cluster_zone_c_private" {
count = length(aws_nat_gateway.eks_zone_c_public)
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.eks_zone_c_public[count.index].id
}
tags = local.tags_eks_vpc_private
}
resource "aws_route_table_association" "eks_cluster_zone_a" {
count = length(var.eks_subnets_zone_a_private)
subnet_id = aws_subnet.eks_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster_zone_a_private[count.index].id
}
resource "aws_route_table_association" "eks_cluster_zone_b" {
count = length(var.eks_subnets_zone_b_private)
subnet_id = aws_subnet.eks_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster_zone_b_private[count.index].id
}
resource "aws_route_table_association" "eks_cluster_zone_c" {
count = length(var.eks_subnets_zone_c_private)
subnet_id = aws_subnet.eks_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster_zone_c_private[count.index].id
}
{% endif %}

View File

@@ -1,29 +1,10 @@
data "aws_availability_zones" "available" {}
locals {
tags_eks_vpc = merge(
local.tags_eks,
{
Name = "qovery-eks-workers",
"kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared",
"kubernetes.io/role/elb" = 1,
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %}
}
)
}
resource "aws_vpc" "eks" {
cidr_block = var.vpc_cidr_block
enable_dns_hostnames = true
tags = local.tags_eks_vpc
}
{% if vpc_qovery_network_mode == "WithoutNatGateways" %}
# Public subnets
resource "aws_subnet" "eks_zone_a" {
count = length(var.eks_subnets_zone_a)
count = length(var.eks_subnets_zone_a_private)
availability_zone = data.aws_availability_zones.available.names[0]
cidr_block = var.eks_subnets_zone_a[count.index]
cidr_block = var.eks_subnets_zone_a_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
@@ -31,10 +12,10 @@ resource "aws_subnet" "eks_zone_a" {
}
resource "aws_subnet" "eks_zone_b" {
count = length(var.eks_subnets_zone_b)
count = length(var.eks_subnets_zone_b_private)
availability_zone = data.aws_availability_zones.available.names[1]
cidr_block = var.eks_subnets_zone_b[count.index]
cidr_block = var.eks_subnets_zone_b_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
@@ -42,22 +23,16 @@ resource "aws_subnet" "eks_zone_b" {
}
resource "aws_subnet" "eks_zone_c" {
count = length(var.eks_subnets_zone_c)
count = length(var.eks_subnets_zone_c_private)
availability_zone = data.aws_availability_zones.available.names[2]
cidr_block = var.eks_subnets_zone_c[count.index]
cidr_block = var.eks_subnets_zone_c_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc
}
resource "aws_internet_gateway" "eks_cluster" {
vpc_id = aws_vpc.eks.id
tags = local.tags_eks_vpc
}
resource "aws_route_table" "eks_cluster" {
vpc_id = aws_vpc.eks.id
@@ -70,22 +45,23 @@ resource "aws_route_table" "eks_cluster" {
}
resource "aws_route_table_association" "eks_cluster_zone_a" {
count = length(var.eks_subnets_zone_a)
count = length(var.eks_subnets_zone_a_private)
subnet_id = aws_subnet.eks_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_b" {
count = length(var.eks_subnets_zone_b)
count = length(var.eks_subnets_zone_b_private)
subnet_id = aws_subnet.eks_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_c" {
count = length(var.eks_subnets_zone_c)
count = length(var.eks_subnets_zone_c_private)
subnet_id = aws_subnet.eks_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
{% endif %}

View File

@@ -1,15 +1,19 @@
{% for eks_worker_node in eks_worker_nodes %}
resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" {
cluster_name = aws_eks_cluster.eks_cluster.name
version = var.eks_k8s_versions.workers
node_group_name = "qovery-${var.kubernetes_cluster_id}-{{ loop.index }}"
node_role_arn = aws_iam_role.eks_workers.arn
subnet_ids = flatten([aws_subnet.eks_zone_a[*].id, aws_subnet.eks_zone_b[*].id, aws_subnet.eks_zone_c[*].id])
cluster_name = aws_eks_cluster.eks_cluster.name
version = var.eks_k8s_versions.workers
node_role_arn = aws_iam_role.eks_workers.arn
node_group_name_prefix = "qovery-"
subnet_ids = flatten([aws_subnet.eks_zone_a[*].id, aws_subnet.eks_zone_b[*].id, aws_subnet.eks_zone_c[*].id])
instance_types = ["{{ eks_worker_node.instance_type }}"]
ami_type = "AL2_x86_64"
tags = local.tags_eks
tags = merge(
local.tags_eks,
{
"QoveryNodeGroupId" = "${var.kubernetes_cluster_id}-{{ loop.index }}"
}
)
scaling_config {
desired_size = "{{ eks_worker_node.desired_size }}"
@@ -17,9 +21,14 @@ resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" {
min_size = "{{ eks_worker_node.min_size }}"
}
remote_access {
ec2_ssh_key = var.ec2_ssh_default_key.key_name
source_security_group_ids = [aws_security_group.eks_cluster_workers.id]
lifecycle {
// don't update the desired size and let the cluster-autoscaler do the job
ignore_changes = [scaling_config[0].desired_size]
create_before_destroy = true
}
update_config {
max_unavailable_percentage = 10
}
timeouts {
@@ -28,11 +37,6 @@ resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" {
update = "60m"
}
lifecycle {
// don't update the desired size and let the cluster-autoscaler do the job
ignore_changes = [scaling_config[0].desired_size]
}
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [

View File

@@ -43,16 +43,6 @@ resource "aws_security_group_rule" "node_ingress_cluster" {
type = "ingress"
}
resource "aws_security_group_rule" "ssh_access_to_workers" {
description = "Allow SSH on worker nodes"
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster_workers.id
source_security_group_id = aws_security_group.eks_cluster.id
to_port = 22
type = "ingress"
}
############################################
# Worker Node Access to EKS Master Cluster #
############################################

View File

@@ -52,24 +52,44 @@ variable "vpc_cidr_block" {
# Kubernetes
variable "eks_subnets_zone_a" {
description = "EKS subnets Zone A"
default = {{ eks_zone_a_subnet_blocks }}
variable "eks_subnets_zone_a_private" {
description = "EKS private subnets Zone A"
default = {{ eks_zone_a_subnet_blocks_private }}
type = list(string)
}
variable "eks_subnets_zone_b" {
description = "EKS subnets Zone B"
default = {{ eks_zone_b_subnet_blocks }}
variable "eks_subnets_zone_b_private" {
description = "EKS private subnets Zone B"
default = {{ eks_zone_b_subnet_blocks_private }}
type = list(string)
}
variable "eks_subnets_zone_c" {
description = "EKS subnets Zone C"
default = {{ eks_zone_c_subnet_blocks }}
variable "eks_subnets_zone_c_private" {
description = "EKS private subnets Zone C"
default = {{ eks_zone_c_subnet_blocks_private }}
type = list(string)
}
{% if vpc_qovery_network_mode == "WithNatGateways" %}
variable "eks_subnets_zone_a_public" {
description = "EKS public subnets Zone A"
default = {{ eks_zone_a_subnet_blocks_public }}
type = list(string)
}
variable "eks_subnets_zone_b_public" {
description = "EKS public subnets Zone B"
default = {{ eks_zone_b_subnet_blocks_public }}
type = list(string)
}
variable "eks_subnets_zone_c_public" {
description = "EKS public subnets Zone C"
default = {{ eks_zone_c_subnet_blocks_public }}
type = list(string)
}
{% endif %}
variable "eks_cidr_subnet" {
description = "EKS CIDR (x.x.x.x/CIDR)"
default = {{ eks_cidr_subnet }}
@@ -98,7 +118,7 @@ variable "kubernetes_cluster_name" {
}
variable "eks_access_cidr_blocks" {
description = "Kubernetes cluster name"
description = "Kubernetes CIDR Block"
default = {{ eks_access_cidr_blocks }}
type = list(string)
}
@@ -117,17 +137,6 @@ variable "s3_bucket_kubeconfig" {
type = string
}
# EC2 SSH default SSH key
variable "ec2_ssh_default_key" {
description = "Default SSH key"
default = {
"key_name" = "qovery-{{ kubernetes_cluster_id }}"
"public_key" = "{{ qovery_ssh_key }}"
}
type = map(string)
}
# Engine info
variable "qovery_engine_info" {

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.36.0"
version = "~> 3.59.0"
}
external = {
source = "hashicorp/external"

View File

@@ -17,4 +17,4 @@ spec:
- port: {{ .Values.metrics.portName }}
interval: 30s
scrapeTimeout: 5s
{{- end }}
{{- end }}

View File

@@ -1,4 +1,4 @@
use crate::cloud_provider::aws::kubernetes::Options;
use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use crate::cloud_provider::helm::{
get_chart_namespace, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated, CommonChart, CoreDNSConfigChart,
HelmAction, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart,
@@ -35,6 +35,7 @@ pub struct ChartsConfigPrerequisites {
pub test_cluster: bool,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
pub vpc_qovery_network_mode: VpcQoveryNetworkMode,
pub ff_log_history_enabled: bool,
pub ff_metrics_history_enabled: bool,
pub managed_dns_name: String,

View File

@@ -41,13 +41,27 @@ use crate::models::{
use crate::object_storage::s3::S3;
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
use core::fmt;
use std::path::PathBuf;
pub mod helm_charts;
pub mod node;
pub mod roles;
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
// https://docs.aws.amazon.com/eks/latest/userguide/external-snat.html
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum VpcQoveryNetworkMode {
WithNatGateways,
WithoutNatGateways,
}
impl fmt::Display for VpcQoveryNetworkMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Options {
// AWS related
pub eks_zone_a_subnet_blocks: Vec<String>,
@@ -65,6 +79,7 @@ pub struct Options {
pub elasticsearch_zone_a_subnet_blocks: Vec<String>,
pub elasticsearch_zone_b_subnet_blocks: Vec<String>,
pub elasticsearch_zone_c_subnet_blocks: Vec<String>,
pub vpc_qovery_network_mode: VpcQoveryNetworkMode,
pub vpc_cidr_block: String,
pub eks_cidr_subnet: String,
pub eks_access_cidr_blocks: Vec<String>,
@@ -164,13 +179,59 @@ impl<'a> EKS<'a> {
.to_string()
}
// divide by 2 the total number of subnet to get the exact same number as private and public
fn check_odd_subnets(&self, zone_name: &str, subnet_block: &Vec<String>) -> Result<usize, EngineError> {
let is_odd = subnet_block.len() % 2;
if is_odd == 1 {
Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: Some(format!(
"the number of subnets for zone '{}' should be an even number, not an odd!",
zone_name
)),
})
} else {
Ok((subnet_block.len() / 2) as usize)
}
}
fn tera_context(&self) -> Result<TeraContext, EngineError> {
let mut context = TeraContext::new();
let format_ips =
|ips: &Vec<String>| -> Vec<String> { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::<Vec<_>>() };
let eks_zone_a_subnet_blocks = format_ips(&self.options.eks_zone_a_subnet_blocks);
let eks_zone_b_subnet_blocks = format_ips(&self.options.eks_zone_b_subnet_blocks);
let eks_zone_c_subnet_blocks = format_ips(&self.options.eks_zone_c_subnet_blocks);
let mut eks_zone_a_subnet_blocks_private = format_ips(&self.options.eks_zone_a_subnet_blocks);
let mut eks_zone_b_subnet_blocks_private = format_ips(&self.options.eks_zone_b_subnet_blocks);
let mut eks_zone_c_subnet_blocks_private = format_ips(&self.options.eks_zone_c_subnet_blocks);
match self.options.vpc_qovery_network_mode {
VpcQoveryNetworkMode::WithNatGateways => {
let max_subnet_zone_a = self.check_odd_subnets("a", &eks_zone_a_subnet_blocks_private)?;
let max_subnet_zone_b = self.check_odd_subnets("b", &eks_zone_b_subnet_blocks_private)?;
let max_subnet_zone_c = self.check_odd_subnets("c", &eks_zone_c_subnet_blocks_private)?;
let eks_zone_a_subnet_blocks_public: Vec<String> =
eks_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect();
let eks_zone_b_subnet_blocks_public: Vec<String> =
eks_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect();
let eks_zone_c_subnet_blocks_public: Vec<String> =
eks_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect();
context.insert("eks_zone_a_subnet_blocks_public", &eks_zone_a_subnet_blocks_public);
context.insert("eks_zone_b_subnet_blocks_public", &eks_zone_b_subnet_blocks_public);
context.insert("eks_zone_c_subnet_blocks_public", &eks_zone_c_subnet_blocks_public);
}
VpcQoveryNetworkMode::WithoutNatGateways => {}
};
context.insert(
"vpc_qovery_network_mode",
&self.options.vpc_qovery_network_mode.to_string(),
);
let rds_zone_a_subnet_blocks = format_ips(&self.options.rds_zone_a_subnet_blocks);
let rds_zone_b_subnet_blocks = format_ips(&self.options.rds_zone_b_subnet_blocks);
let rds_zone_c_subnet_blocks = format_ips(&self.options.rds_zone_c_subnet_blocks);
@@ -219,7 +280,6 @@ impl<'a> EKS<'a> {
let managed_dns_domains_terraform_format = terraform_list_format(vec![self.dns_provider.domain().to_string()]);
let managed_dns_resolvers_terraform_format = self.managed_dns_resolvers_terraform_format();
let mut context = TeraContext::new();
// Qovery
context.insert("organization_id", self.cloud_provider.organization_id());
context.insert("qovery_api_url", &qovery_api_url);
@@ -343,9 +403,9 @@ impl<'a> EKS<'a> {
context.insert("kubernetes_cluster_id", self.id());
context.insert("eks_region_cluster_id", region_cluster_id.as_str());
context.insert("eks_worker_nodes", &worker_nodes);
context.insert("eks_zone_a_subnet_blocks", &eks_zone_a_subnet_blocks);
context.insert("eks_zone_b_subnet_blocks", &eks_zone_b_subnet_blocks);
context.insert("eks_zone_c_subnet_blocks", &eks_zone_c_subnet_blocks);
context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private);
context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private);
context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private);
context.insert("eks_masters_version", &self.version());
context.insert("eks_workers_version", &self.version());
context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group);
@@ -845,6 +905,7 @@ impl<'a> Kubernetes for EKS<'a> {
test_cluster: self.context.is_test_cluster(),
aws_access_key_id: self.cloud_provider.access_key_id.to_string(),
aws_secret_access_key: self.cloud_provider.secret_access_key.to_string(),
vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(),
ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory),
ff_metrics_history_enabled: self.context.is_feature_enabled(&Features::MetricsHistory),
managed_dns_name: self.dns_provider.domain().to_string(),

View File

@@ -3,7 +3,7 @@ extern crate serde_derive;
use tracing::error;
use qovery_engine::cloud_provider::aws::kubernetes::node::Node;
use qovery_engine::cloud_provider::aws::kubernetes::{Options, EKS};
use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode, EKS};
use qovery_engine::cloud_provider::aws::AWS;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::docker_hub::DockerHub;
@@ -16,7 +16,7 @@ use crate::cloudflare::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
pub const AWS_QOVERY_ORGANIZATION_ID: &str = "u8nb94c7fwxzr2jt";
pub const AWS_REGION_FOR_S3: &str = "us-east-2";
pub const AWS_REGION_FOR_S3: &str = "eu-west-3";
pub const AWS_KUBERNETES_VERSION: &str = "1.18";
pub const AWS_KUBE_TEST_CLUSTER_ID: &str = "dmubm9agk7sr8a8r";
pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t2.micro";
@@ -86,75 +86,9 @@ pub fn cloud_provider_aws(context: &Context) -> AWS {
pub fn eks_options(secrets: FuncTestsSecrets) -> Options {
Options {
eks_zone_a_subnet_blocks: vec![
"10.0.0.0/23".to_string(),
"10.0.2.0/23".to_string(),
"10.0.4.0/23".to_string(),
"10.0.6.0/23".to_string(),
"10.0.8.0/23".to_string(),
"10.0.10.0/23".to_string(),
"10.0.12.0/23".to_string(),
"10.0.14.0/23".to_string(),
"10.0.16.0/23".to_string(),
"10.0.18.0/23".to_string(),
"10.0.20.0/23".to_string(),
"10.0.22.0/23".to_string(),
"10.0.24.0/23".to_string(),
"10.0.26.0/23".to_string(),
"10.0.28.0/23".to_string(),
"10.0.30.0/23".to_string(),
"10.0.32.0/23".to_string(),
"10.0.34.0/23".to_string(),
"10.0.36.0/23".to_string(),
"10.0.38.0/23".to_string(),
"10.0.40.0/23".to_string(),
],
eks_zone_b_subnet_blocks: vec![
"10.0.42.0/23".to_string(),
"10.0.44.0/23".to_string(),
"10.0.46.0/23".to_string(),
"10.0.48.0/23".to_string(),
"10.0.50.0/23".to_string(),
"10.0.52.0/23".to_string(),
"10.0.54.0/23".to_string(),
"10.0.56.0/23".to_string(),
"10.0.58.0/23".to_string(),
"10.0.60.0/23".to_string(),
"10.0.62.0/23".to_string(),
"10.0.64.0/23".to_string(),
"10.0.66.0/23".to_string(),
"10.0.68.0/23".to_string(),
"10.0.70.0/23".to_string(),
"10.0.72.0/23".to_string(),
"10.0.74.0/23".to_string(),
"10.0.78.0/23".to_string(),
"10.0.80.0/23".to_string(),
"10.0.82.0/23".to_string(),
"10.0.84.0/23".to_string(),
],
eks_zone_c_subnet_blocks: vec![
"10.0.86.0/23".to_string(),
"10.0.88.0/23".to_string(),
"10.0.90.0/23".to_string(),
"10.0.92.0/23".to_string(),
"10.0.94.0/23".to_string(),
"10.0.96.0/23".to_string(),
"10.0.98.0/23".to_string(),
"10.0.100.0/23".to_string(),
"10.0.102.0/23".to_string(),
"10.0.104.0/23".to_string(),
"10.0.106.0/23".to_string(),
"10.0.108.0/23".to_string(),
"10.0.110.0/23".to_string(),
"10.0.112.0/23".to_string(),
"10.0.114.0/23".to_string(),
"10.0.116.0/23".to_string(),
"10.0.118.0/23".to_string(),
"10.0.120.0/23".to_string(),
"10.0.122.0/23".to_string(),
"10.0.124.0/23".to_string(),
"10.0.126.0/23".to_string(),
],
eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()],
eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()],
eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()],
rds_zone_a_subnet_blocks: vec![
"10.0.214.0/23".to_string(),
"10.0.216.0/23".to_string(),
@@ -200,8 +134,9 @@ pub fn eks_options(secrets: FuncTestsSecrets) -> Options {
elasticsearch_zone_a_subnet_blocks: vec!["10.0.184.0/23".to_string(), "10.0.186.0/23".to_string()],
elasticsearch_zone_b_subnet_blocks: vec!["10.0.188.0/23".to_string(), "10.0.190.0/23".to_string()],
elasticsearch_zone_c_subnet_blocks: vec!["10.0.192.0/23".to_string(), "10.0.194.0/23".to_string()],
vpc_qovery_network_mode: VpcQoveryNetworkMode::WithoutNatGateways,
vpc_cidr_block: "10.0.0.0/16".to_string(),
eks_cidr_subnet: "23".to_string(),
eks_cidr_subnet: "20".to_string(),
eks_access_cidr_blocks: secrets
.EKS_ACCESS_CIDR_BLOCKS
.unwrap()

View File

@@ -6,7 +6,8 @@ use ::function_name::named;
use tracing::{span, Level};
use self::test_utilities::aws::eks_options;
use qovery_engine::cloud_provider::aws::kubernetes::EKS;
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways};
use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS};
use qovery_engine::transaction::TransactionResult;
#[allow(dead_code)]
@@ -90,7 +91,13 @@ fn create_upgrade_and_destroy_eks_cluster(
})
}
fn create_and_destroy_eks_cluster(region: &str, secrets: FuncTestsSecrets, test_infra_pause: bool, test_name: &str) {
fn create_and_destroy_eks_cluster(
region: &str,
secrets: FuncTestsSecrets,
test_infra_pause: bool,
vpc_network_mode: VpcQoveryNetworkMode,
test_name: &str,
) {
engine_run_test(|| {
init();
@@ -98,12 +105,15 @@ fn create_and_destroy_eks_cluster(region: &str, secrets: FuncTestsSecrets, test_
let _enter = span.enter();
let context = context();
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let aws = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let mut eks_options = eks_options(secrets);
eks_options.vpc_qovery_network_mode = vpc_network_mode;
let cloudflare = dns_provider_cloudflare(&context);
@@ -115,7 +125,7 @@ fn create_and_destroy_eks_cluster(region: &str, secrets: FuncTestsSecrets, test_
region,
&aws,
&cloudflare,
eks_options(secrets),
eks_options,
nodes,
);
@@ -173,10 +183,19 @@ fn create_and_destroy_eks_cluster(region: &str, secrets: FuncTestsSecrets, test_
#[cfg(feature = "test-aws-infra")]
#[named]
#[test]
fn create_and_destroy_eks_cluster_in_eu_west_3() {
fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() {
let region = "eu-west-3";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, false, function_name!());
create_and_destroy_eks_cluster(&region, secrets, false, WithoutNatGateways, function_name!());
}
#[cfg(feature = "test-aws-infra")]
#[named]
#[test]
fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() {
let region = "eu-west-3";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, false, WithNatGateways, function_name!());
}
#[cfg(feature = "test-aws-infra")]
@@ -185,7 +204,7 @@ fn create_and_destroy_eks_cluster_in_eu_west_3() {
fn create_and_destroy_eks_cluster_in_us_east_2() {
let region = "us-east-2";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, true, function_name!());
create_and_destroy_eks_cluster(&region, secrets, true, WithoutNatGateways, function_name!());
}
// only enable this test manually when we want to perform and validate upgrade process