feat: add edge aws struct

This commit is contained in:
Pierre Mavro
2022-04-20 18:14:58 +02:00
committed by Benjamin Chastanier
parent 0a5a7eba1f
commit bd95f272c6
21 changed files with 1227 additions and 4 deletions

View File

@@ -0,0 +1,10 @@
terraform {
backend "s3" {
access_key = "{{ aws_access_key_tfstates_account }}"
secret_key = "{{ aws_secret_key_tfstates_account }}"
bucket = "{{ aws_terraform_backend_bucket }}"
key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate"
dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}"
region = "{{ aws_region_tfstates_account }}"
}
}

View File

@@ -0,0 +1,81 @@
locals {
tags_documentdb = merge(
aws_eks_cluster.eks_cluster.tags,
{
"Service" = "DocumentDB"
}
)
}
# Network
resource "aws_subnet" "documentdb_zone_a" {
count = length(var.documentdb_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.documentdb_subnets_zone_a[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_documentdb
}
resource "aws_subnet" "documentdb_zone_b" {
count = length(var.documentdb_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.documentdb_subnets_zone_b[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_documentdb
}
resource "aws_subnet" "documentdb_zone_c" {
count = length(var.documentdb_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.documentdb_subnets_zone_c[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_documentdb
}
resource "aws_route_table_association" "documentdb_cluster_zone_a" {
count = length(var.documentdb_subnets_zone_a)
subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "documentdb_cluster_zone_b" {
count = length(var.documentdb_subnets_zone_b)
subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "documentdb_cluster_zone_c" {
count = length(var.documentdb_subnets_zone_c)
subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_docdb_subnet_group" "documentdb" {
description = "DocumentDB linked to ${var.kubernetes_cluster_id}"
name = "documentdb-${aws_vpc.eks.id}"
subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id])
tags = local.tags_documentdb
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "documentdb_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow DocumentDB incoming access from anywhere"
from_port = 27017
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster_workers.id
to_port = 27017
type = "ingress"
}

View File

@@ -0,0 +1,42 @@
data "aws_availability_zones" "available" {}
locals {
tags_eks_vpc = merge(
local.tags_common,
{
Name = "qovery-eks-workers",
"kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared",
"kubernetes.io/role/elb" = 1,
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %}
}
)
tags_eks_vpc_public = merge(
local.tags_eks_vpc,
{
"Public" = "true"
}
)
tags_eks_vpc_private = merge(
local.tags_eks,
{
"Public" = "false"
}
)
}
# VPC
resource "aws_vpc" "eks" {
cidr_block = var.vpc_cidr_block
enable_dns_hostnames = true
tags = local.tags_eks_vpc
}
# Internet gateway
resource "aws_internet_gateway" "eks_cluster" {
vpc_id = aws_vpc.eks.id
tags = local.tags_eks_vpc
}

View File

@@ -0,0 +1,75 @@
{% if vpc_qovery_network_mode == "WithoutNatGateways" %}
# Public subnets
resource "aws_subnet" "eks_zone_a" {
count = length(var.eks_subnets_zone_a_private)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.eks_subnets_zone_a_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc
}
resource "aws_subnet" "eks_zone_b" {
count = length(var.eks_subnets_zone_b_private)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.eks_subnets_zone_b_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc
}
resource "aws_subnet" "eks_zone_c" {
count = length(var.eks_subnets_zone_c_private)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.eks_subnets_zone_c_private[count.index]
vpc_id = aws_vpc.eks.id
map_public_ip_on_launch = true
tags = local.tags_eks_vpc
}
resource "aws_route_table" "eks_cluster" {
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.eks_cluster.id
}
// todo(pmavro): add tests for it when it will be available in the SDK
{% for route in vpc_custom_routing_table %}
route {
cidr_block = "{{ route.destination }}"
gateway_id = "{{ route.target }}"
}
{% endfor %}
tags = local.tags_eks_vpc
}
resource "aws_route_table_association" "eks_cluster_zone_a" {
count = length(var.eks_subnets_zone_a_private)
subnet_id = aws_subnet.eks_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_b" {
count = length(var.eks_subnets_zone_b_private)
subnet_id = aws_subnet.eks_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "eks_cluster_zone_c" {
count = length(var.eks_subnets_zone_c_private)
subnet_id = aws_subnet.eks_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
{% endif %}

View File

@@ -0,0 +1,80 @@
locals {
tags_elasticache = merge(
aws_eks_cluster.eks_cluster.tags,
{
"Service" = "Elasticache"
}
)
}
# Network
resource "aws_subnet" "elasticache_zone_a" {
count = length(var.elasticache_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.elasticache_subnets_zone_a[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticache
}
resource "aws_subnet" "elasticache_zone_b" {
count = length(var.elasticache_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.elasticache_subnets_zone_b[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticache
}
resource "aws_subnet" "elasticache_zone_c" {
count = length(var.elasticache_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.elasticache_subnets_zone_c[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticache
}
resource "aws_route_table_association" "elasticache_cluster_zone_a" {
count = length(var.elasticache_subnets_zone_a)
subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "elasticache_cluster_zone_b" {
count = length(var.elasticache_subnets_zone_b)
subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "elasticache_cluster_zone_c" {
count = length(var.elasticache_subnets_zone_c)
subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_elasticache_subnet_group" "elasticache" {
description = "Elasticache linked to ${var.kubernetes_cluster_id}"
# WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly
name = "elasticache-${aws_vpc.eks.id}"
subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id])
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "elasticache_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow Redis incoming access from anywhere"
from_port = 6379
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster_workers.id
to_port = 6379
type = "ingress"
}

View File

@@ -0,0 +1,79 @@
locals {
tags_elasticsearch = merge(
local.tags_eks,
{
"Service" = "Elasticsearch"
}
)
}
# Network
resource "aws_subnet" "elasticsearch_zone_a" {
count = length(var.elasticsearch_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.elasticsearch_subnets_zone_a[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticsearch
}
resource "aws_subnet" "elasticsearch_zone_b" {
count = length(var.elasticsearch_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.elasticsearch_subnets_zone_b[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticsearch
}
resource "aws_subnet" "elasticsearch_zone_c" {
count = length(var.elasticsearch_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.elasticsearch_subnets_zone_c[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_elasticsearch
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_a" {
count = length(var.elasticsearch_subnets_zone_a)
subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_b" {
count = length(var.elasticsearch_subnets_zone_b)
subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_c" {
count = length(var.elasticsearch_subnets_zone_c)
subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_security_group" "elasticsearch" {
name = "elasticsearch-${var.kubernetes_cluster_id}"
description = "Elasticsearch security group"
vpc_id = aws_vpc.eks.id
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [
aws_vpc.eks.cidr_block
]
}
tags = local.tags_elasticsearch
}

View File

@@ -0,0 +1,29 @@
locals {
kubeconfig_base64 = base64encode(local.kubeconfig)
}
// do not run for tests clusters to avoid uncleaned info.
// do not try to use count into resource, it will fails trying to connect to vault
{% if vault_auth_method != "none" and not test_cluster %}
resource "vault_generic_secret" "cluster-access" {
path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}"
data_json = <<EOT
{
"cloud_provider": "${var.cloud_provider}",
"cluster_name": "${var.kubernetes_cluster_name}",
"KUBECONFIG_b64": "${local.kubeconfig_base64}",
"organization_id": "${var.organization_id}",
"test_cluster": "${var.test_cluster}",
"grafana_login": "{{ grafana_admin_user }}",
"grafana_password": "{{ grafana_admin_password }}",
"AWS_ACCESS_KEY_ID": "{{ aws_access_key }}",
"AWS_SECRET_ACCESS_KEY": "{{ aws_secret_key }}",
"AWS_DEFAULT_REGION": "{{ aws_region }}"
}
EOT
depends_on = [
aws_eks_cluster.eks_cluster,
]
}
{% endif %}

118
lib/edge/aws/rds.tf Normal file
View File

@@ -0,0 +1,118 @@
data "aws_iam_policy_document" "rds_enhanced_monitoring" {
statement {
actions = [
"sts:AssumeRole",
]
effect = "Allow"
principals {
type = "Service"
identifiers = ["monitoring.rds.amazonaws.com"]
}
}
}
locals {
tags_rds = merge(
aws_eks_cluster.eks_cluster.tags,
{
"Service" = "RDS"
}
)
}
# Network
resource "aws_subnet" "rds_zone_a" {
count = length(var.rds_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.rds_subnets_zone_a[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_rds
}
resource "aws_subnet" "rds_zone_b" {
count = length(var.rds_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.rds_subnets_zone_b[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_rds
}
resource "aws_subnet" "rds_zone_c" {
count = length(var.rds_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.rds_subnets_zone_c[count.index]
vpc_id = aws_vpc.eks.id
tags = local.tags_rds
}
resource "aws_route_table_association" "rds_cluster_zone_a" {
count = length(var.rds_subnets_zone_a)
subnet_id = aws_subnet.rds_zone_a.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "rds_cluster_zone_b" {
count = length(var.rds_subnets_zone_b)
subnet_id = aws_subnet.rds_zone_b.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_route_table_association" "rds_cluster_zone_c" {
count = length(var.rds_subnets_zone_c)
subnet_id = aws_subnet.rds_zone_c.*.id[count.index]
route_table_id = aws_route_table.eks_cluster.id
}
resource "aws_db_subnet_group" "rds" {
description = "RDS linked to ${var.kubernetes_cluster_id}"
name = aws_vpc.eks.id
subnet_ids = flatten([aws_subnet.rds_zone_a.*.id, aws_subnet.rds_zone_b.*.id, aws_subnet.rds_zone_c.*.id])
tags = local.tags_rds
}
# IAM
resource "aws_iam_role" "rds_enhanced_monitoring" {
name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}"
assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json
tags = local.tags_rds
}
resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" {
role = aws_iam_role.rds_enhanced_monitoring.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "postgres_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow RDS PostgreSQL incoming access from anywhere"
from_port = 5432
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster_workers.id
to_port = 5432
type = "ingress"
}
resource "aws_security_group_rule" "mysql_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow RDS MySQL incoming access from anywhere"
from_port = 3306
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster_workers.id
to_port = 3306
type = "ingress"
}

View File

@@ -0,0 +1,44 @@
// S3 bucket to store kubeconfigs
resource "aws_s3_bucket" "kubeconfigs_bucket" {
bucket = var.s3_bucket_kubeconfig
acl = "private"
force_destroy = true
versioning {
enabled = true
}
tags = merge(
local.tags_eks,
{
"Name" = "Kubernetes kubeconfig"
}
)
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn
sse_algorithm = "aws:kms"
}
}
}
}
resource "aws_kms_key" "s3_kubeconfig_kms_encryption" {
description = "s3 kubeconfig encryption"
tags = merge(
local.tags_eks,
{
"Name" = "Kubeconfig Encryption"
}
)
}
resource "aws_s3_bucket_public_access_block" "kubeconfigs_access" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
ignore_public_acls = true
restrict_public_buckets = true
block_public_policy = true
block_public_acls = true
}

View File

@@ -0,0 +1,319 @@
# Qovery
variable "cloud_provider" {
description = "Cloud provider name"
default = "aws"
type = string
}
variable "region" {
description = "AWS region to store terraform state and lock"
default = "{{ aws_region }}"
type = string
}
variable "organization_id" {
description = "Qovery Organization ID"
default = "{{ organization_id }}"
type = string
}
variable "qovery_nats_url" {
description = "URL of qovery nats server"
default = "{{ qovery_nats_url }}"
type = string
}
variable "qovery_nats_user" {
description = "user of qovery nats server"
default = "{{ qovery_nats_user }}"
type = string
}
variable "qovery_nats_password" {
description = "password of qovery nats server"
default = "{{ qovery_nats_password }}"
type = string
}
variable "test_cluster" {
description = "Is this a test cluster?"
default = "{{ test_cluster }}"
type = string
}
# AWS specific
variable "aws_availability_zones" {
description = "AWS availability zones"
default = {{ aws_availability_zones }}
type = list(string)
}
variable "vpc_cidr_block" {
description = "VPC CIDR block"
default = "{{ vpc_cidr_block }}"
type = string
}
# Kubernetes
variable "eks_subnets_zone_a_private" {
description = "EKS private subnets Zone A"
default = {{ eks_zone_a_subnet_blocks_private }}
type = list(string)
}
variable "eks_subnets_zone_b_private" {
description = "EKS private subnets Zone B"
default = {{ eks_zone_b_subnet_blocks_private }}
type = list(string)
}
variable "eks_subnets_zone_c_private" {
description = "EKS private subnets Zone C"
default = {{ eks_zone_c_subnet_blocks_private }}
type = list(string)
}
{% if vpc_qovery_network_mode == "WithNatGateways" %}
variable "eks_subnets_zone_a_public" {
description = "EKS public subnets Zone A"
default = {{ eks_zone_a_subnet_blocks_public }}
type = list(string)
}
variable "eks_subnets_zone_b_public" {
description = "EKS public subnets Zone B"
default = {{ eks_zone_b_subnet_blocks_public }}
type = list(string)
}
variable "eks_subnets_zone_c_public" {
description = "EKS public subnets Zone C"
default = {{ eks_zone_c_subnet_blocks_public }}
type = list(string)
}
{% endif %}
variable "eks_cidr_subnet" {
description = "EKS CIDR (x.x.x.x/CIDR)"
default = {{ eks_cidr_subnet }}
type = number
}
variable "eks_k8s_versions" {
description = "Kubernetes version"
default = {
"masters": "{{ eks_masters_version }}",
"workers": "{{ eks_workers_version }}",
}
type = map(string)
}
variable "kubernetes_full_cluster_id" {
description = "Kubernetes full cluster id"
default = "{{ kubernetes_full_cluster_id }}"
type = string
}
variable "kubernetes_cluster_id" {
description = "Kubernetes cluster id"
default = "{{ kubernetes_cluster_id }}"
type = string
}
variable "kubernetes_cluster_name" {
description = "Kubernetes cluster name"
default = "qovery-{{ kubernetes_cluster_id }}"
type = string
}
variable "eks_access_cidr_blocks" {
description = "Kubernetes CIDR Block"
default = {{ eks_access_cidr_blocks }}
type = list(string)
}
variable "eks_cloudwatch_log_group" {
description = "AWS cloudwatch log group for EKS"
default = "qovery-{{ eks_cloudwatch_log_group }}"
type = string
}
# S3 bucket name
variable "s3_bucket_kubeconfig" {
description = "S3 bucket containing kubeconfigs"
default = "{{ s3_kubeconfig_bucket }}"
type = string
}
# Engine info
variable "qovery_engine_info" {
description = "Qovery engine info"
default = {
"token" = "{{ engine_version_controller_token }}"
"api_fqdn" = "{{ qovery_api_url }}"
}
type = map(string)
}
variable "qovery_engine_replicas" {
description = "This variable is used to get random ID generated for the engine"
default = "2"
type = number
}
# Agent info
variable "qovery_agent_info" {
description = "Qovery agent info"
default = {
"token" = "{{ agent_version_controller_token }}"
"api_fqdn" = "{{ qovery_api_url }}"
}
type = map(string)
}
variable "qovery_agent_replicas" {
description = "This variable is used to get random ID generated for the agent"
default = "1"
type = number
}
# RDS
variable "rds_subnets_zone_a" {
description = "RDS subnets Zone A"
default = {{ rds_zone_a_subnet_blocks }}
type = list(string)
}
variable "rds_subnets_zone_b" {
description = "RDS subnets Zone B"
default = {{ rds_zone_b_subnet_blocks }}
type = list(string)
}
variable "rds_subnets_zone_c" {
description = "RDS subnets Zone C"
default = {{ rds_zone_c_subnet_blocks }}
type = list(string)
}
variable "rds_cidr_subnet" {
description = "RDS CIDR (x.x.x.x/CIDR)"
default = {{ rds_cidr_subnet }}
type = number
}
# DocumentDB
variable "documentdb_subnets_zone_a" {
description = "DocumentDB subnets Zone A"
default = {{ documentdb_zone_a_subnet_blocks }}
type = list(string)
}
variable "documentdb_subnets_zone_b" {
description = "DocumentDB subnets Zone B"
default = {{ documentdb_zone_b_subnet_blocks }}
type = list(string)
}
variable "documentdb_subnets_zone_c" {
description = "DocumentDB subnets Zone C"
default = {{ documentdb_zone_c_subnet_blocks }}
type = list(string)
}
variable "documentdb_cidr_subnet" {
description = "DocumentDB CIDR (x.x.x.x/CIDR)"
default = {{ documentdb_cidr_subnet }}
type = number
}
# Elasticache
variable "elasticache_subnets_zone_a" {
description = "Elasticache subnets Zone A"
default = {{ elasticache_zone_a_subnet_blocks }}
type = list(string)
}
variable "elasticache_subnets_zone_b" {
description = "Elasticache subnets Zone B"
default = {{ elasticache_zone_b_subnet_blocks }}
type = list(string)
}
variable "elasticache_subnets_zone_c" {
description = "Elasticache subnets Zone C"
default = {{ elasticache_zone_c_subnet_blocks }}
type = list(string)
}
variable "elasticache_cidr_subnet" {
description = "Elasticache CIDR (x.x.x.x/CIDR)"
default = {{ elasticache_cidr_subnet }}
type = number
}
# Elasticsearch
variable "elasticsearch_subnets_zone_a" {
description = "Elasticsearch subnets Zone A"
default = {{ elasticsearch_zone_a_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_subnets_zone_b" {
description = "Elasticsearch subnets Zone B"
default = {{ elasticsearch_zone_b_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_subnets_zone_c" {
description = "Elasticsearch subnets Zone C"
default = {{ elasticsearch_zone_c_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_cidr_subnet" {
description = "Elasticsearch CIDR (x.x.x.x/CIDR)"
default = {{ elasticsearch_cidr_subnet }}
type = number
}
# Helm alert manager discord
variable "discord_api_key" {
description = "discord url with token for used for alerting"
default = "{{ discord_api_key }}"
type = string
}
# Qovery features
variable "log_history_enabled" {
description = "Enable log history"
default = {{ log_history_enabled }}
type = bool
}
variable "metrics_history_enabled" {
description = "Enable metrics history"
default = {{ metrics_history_enabled }}
type = bool
}
{%- if resource_expiration_in_seconds is defined %}
# Pleco ttl
variable "resource_expiration_in_seconds" {
description = "Resource expiration in seconds"
default = {{ resource_expiration_in_seconds }}
type = number
}
{% endif %}

View File

@@ -0,0 +1,60 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.66.0"
}
external = {
source = "hashicorp/external"
version = "~> 1.2"
}
vault = {
source = "hashicorp/vault"
version = "~> 2.24.1"
}
local = {
source = "hashicorp/local"
version = "~> 1.4"
}
null = {
source = "hashicorp/null"
version = "~> 2.1"
}
random = {
source = "hashicorp/random"
version = "~> 2.3"
}
time = {
source = "hashicorp/time"
version = "~> 0.3"
}
}
required_version = ">= 0.13"
}
provider "aws" {
profile = "default"
access_key = "{{ aws_access_key }}"
secret_key = "{{ aws_secret_key }}"
region = "{{ aws_region }}"
}
provider "aws" {
alias = "tfstates"
access_key = "{{ aws_access_key_tfstates_account }}"
secret_key = "{{ aws_secret_key_tfstates_account }}"
region = "{{ aws_region_tfstates_account }}"
}
provider "vault" {
{% if vault_auth_method == "app_role" and not test_cluster %}
auth_login {
path = "auth/approle/login"
parameters = {
role_id = "{{ vault_role_id }}"
secret_id = "{{ vault_secret_id }}"
}
}
{% endif %}
}

View File

@@ -1,4 +1,4 @@
use crate::cloud_provider::Kind as KindModel;
use crate::cloud_provider::{Edge, Kind as KindModel};
use serde_derive::{Deserialize, Serialize};
#[derive(Deserialize, Serialize)]
@@ -7,6 +7,7 @@ pub enum Kind {
Aws,
Do,
Scw,
Edge(Edge),
}
impl From<KindModel> for Kind {
@@ -15,6 +16,7 @@ impl From<KindModel> for Kind {
KindModel::Aws => Kind::Aws,
KindModel::Do => Kind::Do,
KindModel::Scw => Kind::Scw,
KindModel::Edge(Edge::Aws) => Kind::Edge(Edge::Aws),
}
}
}

View File

@@ -52,6 +52,12 @@ pub enum Kind {
Aws,
Do,
Scw,
Edge(Edge),
}
#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Edge {
Aws,
}
impl Display for Kind {
@@ -60,6 +66,7 @@ impl Display for Kind {
Kind::Aws => "AWS",
Kind::Do => "Digital Ocean",
Kind::Scw => "Scaleway",
Kind::Edge(Edge::Aws) => "Edge AWS",
})
}
}

View File

@@ -16,8 +16,8 @@ use url::Url;
use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey};
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::service::{DatabaseOptions, RouterService};
use crate::cloud_provider::Kind as CPKind;
use crate::cloud_provider::{service, CloudProvider};
use crate::cloud_provider::{Edge, Kind as CPKind, Kind};
use crate::cmd::docker::Docker;
use crate::container_registry::ContainerRegistryInfo;
use crate::logger::Logger;
@@ -295,6 +295,25 @@ impl Application {
listeners,
logger.clone(),
)?)),
Kind::Edge(Edge::Aws) => Ok(Box::new(models::application::Application::<AWS>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
self.ports.clone(),
self.total_cpus.clone(),
self.cpu_burst.clone(),
self.total_ram_in_mib,
self.min_instances,
self.max_instances,
build,
self.storage.iter().map(|s| s.to_aws_storage()).collect::<Vec<_>>(),
environment_variables,
self.advance_settings.clone(),
AwsAppExtraSettings {},
listeners,
logger.clone(),
)?)),
}
}
@@ -586,6 +605,22 @@ impl Router {
)?);
Ok(router)
}
Kind::Edge(Edge::Aws) => {
let router = Box::new(models::router::Router::<AWS>::new(
context.clone(),
self.id.as_str(),
self.name.as_str(),
self.action.to_service_action(),
self.default_domain.as_str(),
custom_domains,
routes,
self.sticky_sessions_enabled,
AwsRouterExtraSettings {},
listeners,
logger,
)?);
Ok(router)
}
}
}
}
@@ -1064,6 +1099,176 @@ impl Database {
service::DatabaseType::MongoDB,
SCW::full_name().to_string(),
)),
(CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::MANAGED) => {
let db = models::database::Database::<AWS, Managed, PostgresSQL>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => {
let db = models::database::Database::<AWS, Container, PostgresSQL>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::MANAGED) => {
let db = models::database::Database::<AWS, Managed, MySQL>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::CONTAINER) => {
let db = models::database::Database::<AWS, Container, MySQL>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::MANAGED) => {
let db = models::database::Database::<AWS, Managed, Redis>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::CONTAINER) => {
let db = models::database::Database::<AWS, Container, Redis>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::MANAGED) => {
let db = models::database::Database::<AWS, Managed, MongoDB>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
(CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => {
let db = models::database::Database::<AWS, Container, MongoDB>::new(
context.clone(),
self.id.as_str(),
self.action.to_service_action(),
self.name.as_str(),
version,
self.fqdn.as_str(),
self.fqdn_id.as_str(),
self.total_cpus.clone(),
self.total_ram_in_mib,
self.database_instance_type.as_str(),
database_options.publicly_accessible,
database_options.port,
database_options,
listeners,
logger,
)?;
Ok(Box::new(db))
}
}
}
}

View File

@@ -13,6 +13,7 @@ use qovery_engine::io_models::{
use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION};
use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION};
use crate::edge_aws_rs::AWS_K3S_VERSION;
use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE};
use crate::utilities::{
db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init,
@@ -29,7 +30,7 @@ use qovery_engine::cloud_provider::kubernetes::Kubernetes;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::cloud_provider::{CloudProvider, Kind};
use qovery_engine::cloud_provider::{CloudProvider, Edge, Kind};
use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas;
use qovery_engine::cmd::structs::SVCItem;
use qovery_engine::engine::EngineConfig;
@@ -1135,10 +1136,11 @@ pub fn test_db(
Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()),
Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()),
Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()),
Kind::Edge(Edge::Aws) => (AWS_TEST_REGION.to_string(), AWS_K3S_VERSION.to_string()),
};
let engine_config = match provider_kind {
Kind::Aws => AWS::docker_cr_engine(
Kind::Aws | Kind::Edge(Edge::Aws) => AWS::docker_cr_engine(
&context,
logger.clone(),
localisation.as_str(),

View File

@@ -0,0 +1 @@
pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1";

View File

@@ -7,5 +7,6 @@ pub mod aws;
pub mod cloudflare;
pub mod common;
pub mod digitalocean;
pub mod edge_aws_rs;
pub mod scaleway;
pub mod utilities;

View File

@@ -0,0 +1,65 @@
extern crate test_utilities;
use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION};
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger};
use ::function_name::named;
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode;
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways};
use qovery_engine::cloud_provider::aws::regions::AwsRegion;
use qovery_engine::cloud_provider::Kind;
use std::str::FromStr;
use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType};
pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1";
#[cfg(feature = "test-aws-infra")]
fn create_and_destroy_edge_aws_cluster(
region: String,
test_type: ClusterTestType,
major_boot_version: u8,
minor_boot_version: u8,
vpc_network_mode: VpcQoveryNetworkMode,
test_name: &str,
) {
engine_run_test(|| {
let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region");
let zones = region.get_zones();
cluster_test(
test_name,
Kind::Aws,
context(
generate_id().as_str(),
generate_cluster_id(region.to_string().as_str()).as_str(),
),
logger(),
region.to_aws_format().as_str(),
Some(zones),
test_type,
major_boot_version,
minor_boot_version,
&ClusterDomain::Default,
Option::from(vpc_network_mode),
None,
)
})
}
/*
TESTS NOTES:
It is useful to keep 2 clusters deployment tests to run in // to validate there is no name collision (overlaping)
*/
#[cfg(feature = "test-aws-infra")]
#[named]
#[test]
fn create_and_destroy_edge_aws_cluster_eu_west_3() {
let region = "eu-west-3".to_string();
create_and_destroy_eks_cluster(
region,
ClusterTestType::Classic,
K3S_MAJOR_VERSION,
K3S_MINOR_VERSION,
WithoutNatGateways,
function_name!(),
);
}

1
tests/edge/aws/mod.rs Normal file
View File

@@ -0,0 +1 @@
mod edge_aws_kubernetes;

1
tests/edge/mod.rs Normal file
View File

@@ -0,0 +1 @@
mod aws;

View File

@@ -3,4 +3,5 @@ extern crate maplit;
mod aws;
mod digitalocean;
mod edge;
mod scaleway;