feat: connected AWS helm level 1 to 3

This commit is contained in:
Pierre Mavro
2021-06-07 21:24:53 +02:00
committed by Pierre Mavro
parent 5bfcc8f19a
commit b3118a7a68
44 changed files with 1398 additions and 2588 deletions

1183
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,11 +16,11 @@ base64 = "0.13.0"
dirs = "3.0.2"
rust-crypto = "0.2.36"
retry = "1.2.1"
trust-dns-resolver = "0.20.3"
rand = "0.8.3"
trust-dns-resolver = "0.19.6"
rand = "0.7.3"
gethostname = "0.2.1"
reqwest = { version = "0.11.3", features = ["blocking"] }
futures = "0.3.15"
reqwest = { version = "0.10.8", features = ["blocking"] }
futures = "0.3"
timeout-readwrite = "0.3.1"
lazy_static = "1.4.0"
@@ -31,23 +31,23 @@ flate2 = "1.0.20" # tar gz
tar = "0.4.35"
# logger
tracing = "0.1.26"
tracing-subscriber = "0.2.18"
tracing = "0.1"
tracing-subscriber = "0.2"
# Docker deps
# shiplift = "0.6.0"
# Filesystem
sysinfo = "0.18.2"
sysinfo = "0.16.4"
# Jinja2
tera = "1.10.0"
# Json
serde = "1.0.126"
serde_json = "1.0.64"
serde_derive = "1.0.126"
serde = "1.0.114"
serde_json = "1.0.57"
serde_derive = "1.0"
# AWS deps
tokio = { version = "1.6.1", features = ["full"] }
tokio = { version = "1.5.0", features = ["full"] }
rusoto_core = "0.46.0"
rusoto_sts = "0.46.0"
rusoto_credential = "0.46.0"

View File

@@ -0,0 +1,20 @@
enabledFeatures:
disableDryRun: true
checkInterval: 120
kubernetes: "in"
awsRegions:
- eu-west-3
- us-east-2
rds: true
documentdb: true
elasticache: true
eks: true
elb: true
ebs: true
vpc: true
s3: true
kms: true
cloudwatchLogs: true
iam: true
sshKeys: true
ecr: true

View File

@@ -21,7 +21,7 @@ resource "aws_cloudwatch_log_group" "eks_cloudwatch_log_group" {
}
resource "aws_eks_cluster" "eks_cluster" {
name = "qovery-${var.kubernetes_cluster_id}"
name = var.kubernetes_cluster_name
role_arn = aws_iam_role.eks_cluster.arn
version = var.eks_k8s_versions.masters

View File

@@ -1,50 +0,0 @@
//resource "helm_release" "alertmanager_discord" {
// name = "alertmanager-discord"
// chart = "common/charts/alertmanager-discord"
// namespace = "prometheus"
// create_namespace = true
// atomic = true
// max_history = 50
//
// set {
// name = "replicaCount"
// value = "1"
// }
//
// # Interrupt channel
// set {
// name = "application.environmentVariables.DISCORD_WEBHOOK"
// value = var.discord_api_key
// }
//
// set {
// name = "resources.limits.cpu"
// value = "50m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "50m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "50Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "50Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// ]
//}

View File

@@ -31,63 +31,4 @@ resource "aws_iam_user_policy" "iam_eks_user_mapper" {
]
}
EOF
}
//resource "helm_release" "iam_eks_user_mapper" {
// name = "iam-eks-user-mapper"
// chart = "charts/iam-eks-user-mapper"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "aws.accessKey"
// value = aws_iam_access_key.iam_eks_user_mapper.id
// }
//
// set {
// name = "aws.secretKey"
// value = aws_iam_access_key.iam_eks_user_mapper.secret
// }
//
// set {
// name = "aws.region"
// value = var.region
// }
//
// set {
// name = "syncIamGroup"
// value = "Admins"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "20m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "10m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "32Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "32Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// ]
//}
}

View File

@@ -1,65 +0,0 @@
// temporary removed as we need to check the business support first before deploying it
// otherwise it crashes all the time
//resource "aws_iam_user" "iam_aws_limits_exporter" {
// name = "qovery-aws-limits-exporter-${var.kubernetes_cluster_id}"
//
// tags = local.tags_eks
//}
//
//resource "aws_iam_access_key" "iam_aws_limits_exporter" {
// user = aws_iam_user.iam_aws_limits_exporter.name
//}
//
//resource "aws_iam_user_policy" "iam_aws_limits_exporter" {
// name = aws_iam_user.iam_aws_limits_exporter.name
// user = aws_iam_user.iam_aws_limits_exporter.name
//
// policy = <<EOF
//{
// "Version": "2012-10-17",
// "Statement": [
// {
// "Effect": "Allow",
// "Action": [
// "support:*"
// ],
// "Resource": [
// "*"
// ]
// }
// ]
//}
//EOF
//}
//
//resource "helm_release" "iam_aws_limits_exporter" {
// name = "aws-limits-exporter"
// chart = "charts/aws-limits-exporter"
// namespace = "prometheus"
// create_namespace = true
// atomic = true
// max_history = 50
//
// // We can't activate it now until we got the support info into metadata field
// // make a fake arg to avoid TF to validate update on failure because of the atomic option
//// set {
//// name = "fake"
//// value = timestamp()
//// }
//
// set {
// name = "awsCredentials.awsAccessKey"
// value = aws_iam_access_key.iam_aws_limits_exporter.id
// }
//
// set {
// name = "awsCredentials.awsSecretKey"
// value = aws_iam_access_key.iam_aws_limits_exporter.secret
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.cluster_autoscaler,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -1,63 +0,0 @@
//resource "helm_release" "aws_node_term_handler" {
// name = "aws-node-term-handler"
// chart = "charts/aws-node-termination-handler"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "nameOverride"
// value = "aws-node-term-handler"
// }
//
// set {
// name = "fullnameOverride"
// value = "aws-node-term-handler"
// }
//
// set {
// name = "image.tag"
// value = "v1.5.0"
// }
//
// set {
// name = "enableSpotInterruptionDraining"
// value = "true"
// }
//
// set {
// name = "enableScheduledEventDraining"
// value = "true"
// }
//
// set {
// name = "deleteLocalData"
// value = "true"
// }
//
// set {
// name = "ignoreDaemonSets"
// value = "true"
// }
//
// set {
// name = "podTerminationGracePeriod"
// value = "300"
// }
//
// set {
// name = "nodeTerminationGracePeriod"
// value = "120"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// ]
//}

View File

@@ -1,119 +0,0 @@
//locals {
// aws_cni_chart_release_name = "aws-vpc-cni"
//}
//
//data "external" "is_cni_old_installed_version" {
// program = ["./helper.sh", "is_cni_old_installed_version"]
// depends_on = [
// aws_eks_cluster.eks_cluster,
// null_resource.enable_cni_managed_by_helm,
// ]
//}
//
//# On the first boot, it's required to remove the existing CNI to get them managed by helm
//resource "null_resource" "enable_cni_managed_by_helm" {
// provisioner "local-exec" {
// command = <<EOT
//./helper.sh enable_cni_managed_by_helm
//EOT
//
// environment = {
// KUBECONFIG = local_file.kubeconfig.filename
// AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
// AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
// AWS_DEFAULT_REGION = "{{ aws_region }}"
// }
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// ]
//}
//
//locals {
// aws_cni = <<CNI
//crd:
// create: false
//CNI
//}
//
//resource "helm_release" "aws_vpc_cni" {
// name = local.aws_cni_chart_release_name
// chart = "charts/aws-vpc-cni"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// values = [
// local.aws_cni,
// ]
//
// set {
// name = "image.region"
// value = var.region
// type = "string"
// }
//
// set {
// name = "image.pullPolicy"
// value = "IfNotPresent"
// type = "string"
// }
//
// set {
// name = "originalMatchLabels"
// value = data.external.is_cni_old_installed_version.result.is_cni_old_installed_version
// type = "string"
// }
//
// # label ENIs
// set {
// name = "env.CLUSTER_NAME"
// value = var.kubernetes_cluster_name
// type = "string"
// }
//
// ## POD ALLOCATION ##
// # number of total IP addresses that the daemon should attempt to allocate for pod assignment on the node (init phase)
// set {
// name = "env.MINIMUM_IP_TARGET"
// value = "60"
// type = "string"
// }
//
// # number of free IP addresses that the daemon should attempt to keep available for pod assignment on the node
// set {
// name = "env.WARM_IP_TARGET"
// value = "10"
// type = "string"
// }
//
// # maximum number of ENIs that will be attached to the node (k8s recommend to avoid going over 100)
// set {
// name = "env.MAX_ENI"
// value = "100"
// type = "string"
// }
//
// # Limits
// set {
// name = "resources.requests.cpu"
// value = "50m"
// type = "string"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// type = "string"
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// null_resource.enable_cni_managed_by_helm,
// data.external.is_cni_old_installed_version,
// {% if not test_cluster %}
// vault_generic_secret.cluster-access,
// {% endif %}
// ]
//}

View File

@@ -1,18 +0,0 @@
//
//resource "helm_release" "calico" {
// name = "calico"
// chart = "charts/aws-calico"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -1,158 +0,0 @@
//resource "helm_release" "cert_manager" {
// name = "cert-manager"
// chart = "common/charts/cert-manager"
// namespace = "cert-manager"
// create_namespace = true
// atomic = true
// max_history = 50
// timeout = 480
//
// values = [file("chart_values/cert-manager.yaml")]
//
// set {
// name = "installCRDs"
// value = "true"
// }
//
// set {
// name = "replicaCount"
// value = "2"
// }
//
// # https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check
// set {
// name = "extraArgs"
// value = "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}"
// }
//
// set {
// name = "prometheus.servicemonitor.enabled"
// value = var.metrics_history_enabled
// }
//
// set {
// name = "prometheus.servicemonitor.prometheusInstance"
// value = "qovery"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "200m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "1Gi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "1Gi"
// }
//
// # Limits webhook
// set {
// name = "webhook.resources.limits.cpu"
// value = "20m"
// }
//
// set {
// name = "webhook.resources.requests.cpu"
// value = "20m"
// }
//
// set {
// name = "webhook.resources.limits.memory"
// value = "128Mi"
// }
//
// set {
// name = "webhook.resources.requests.memory"
// value = "128Mi"
// }
//
// # Limits cainjector
// set {
// name = "cainjector.resources.limits.cpu"
// value = "500m"
// }
//
// set {
// name = "cainjector.resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "cainjector.resources.limits.memory"
// value = "1Gi"
// }
//
// set {
// name = "cainjector.resources.requests.memory"
// value = "1Gi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.cluster_autoscaler,
// helm_release.aws_vpc_cni,
// ]
//}
//
//resource "helm_release" "cert_manager_config" {
// name = "cert-manager-configs"
// chart = "common/charts/cert-manager-configs"
// namespace = "cert-manager"
// atomic = true
// max_history = 50
//
// depends_on = [helm_release.cert_manager]
//
// set {
// name = "externalDnsProvider"
// value = "{{ external_dns_provider }}"
// }
//
// set {
// name = "acme.letsEncrypt.emailReport"
// value = "{{ dns_email_report }}"
// }
//
// set {
// name = "acme.letsEncrypt.acmeUrl"
// value = "{{ acme_server_url }}"
// }
//
// set {
// name = "managedDns"
// value = "{{ managed_dns_domains_terraform_format }}"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
//{% if external_dns_provider == "cloudflare" %}
// set {
// name = "provider.cloudflare.apiToken"
// value = "{{ cloudflare_api_token }}"
// }
//
// set {
// name = "provider.cloudflare.email"
// value = "{{ cloudflare_email }}"
// }
//{% endif %}
//}

View File

@@ -35,108 +35,4 @@ POLICY
resource "aws_iam_user_policy_attachment" "s3_cluster_autoscaler_attachment" {
user = aws_iam_user.iam_eks_cluster_autoscaler.name
policy_arn = aws_iam_policy.cluster_autoscaler_policy.arn
}
//resource "helm_release" "cluster_autoscaler" {
// name = "cluster-autoscaler"
// chart = "common/charts/cluster-autoscaler"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "replicaCount"
// value = "{% if enable_cluster_autoscaler %}1{% else %}0{% endif %}"
// }
//
// set {
// name = "cloudProvider"
// value = "aws"
// }
//
// set {
// name = "autoDiscovery.clusterName"
// value = aws_eks_cluster.eks_cluster.name
// }
//
// set {
// name = "awsRegion"
// value = var.region
// }
//
// set {
// name = "awsAccessKeyID"
// value = aws_iam_access_key.iam_eks_cluster_autoscaler.id
// }
//
// set {
// name = "awsSecretAccessKey"
// value = aws_iam_access_key.iam_eks_cluster_autoscaler.secret
// }
//
// # It's mandatory to get this class to ensure paused infra will behave properly on restore
// set {
// name = "priorityClassName"
// value = "system-cluster-critical"
// }
//
// # cluster autoscaler options
//
// set {
// name = "extraArgs.balance-similar-node-groups"
// value = "true"
// }
//
// set {
// name = "extraArgs.balance-similar-node-groups"
// value = "true"
// }
//
// # observability
// set {
// name = "serviceMonitor.enabled"
// value = var.metrics_history_enabled
// }
//
// set {
// name = "serviceMonitor.namespace"
// value = local.prometheus_namespace
// }
//
// # resources limitation
// set {
// name = "resources.limits.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "300Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "300Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_iam_user.iam_eks_cluster_autoscaler,
// aws_iam_access_key.iam_eks_cluster_autoscaler,
// aws_iam_user_policy_attachment.s3_cluster_autoscaler_attachment,
// aws_eks_cluster.eks_cluster,
// {% if metrics_history_enabled %}
// helm_release.prometheus_operator,
// {% endif %}
// helm_release.aws_vpc_cni,
// ]
//}
}

View File

@@ -1,61 +0,0 @@
//# On the first boot, it's required to remove the existing CoreDNS config to get it managed by helm
//resource "null_resource" "delete_aws_default_coredns_config" {
// provisioner "local-exec" {
// command = <<EOT
//kubectl -n kube-system delete configmap coredns
//EOT
// environment = {
// KUBECONFIG = local_file.kubeconfig.filename
// AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
// AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
// AWS_DEFAULT_REGION = "{{ aws_region }}"
// }
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// ]
//}
//
//resource "helm_release" "coredns-config" {
// name = "coredns-config"
// chart = "charts/coredns-config"
// namespace = "kube-system"
// atomic = true
// max_history = 50
// force_update = true
//
// set {
// name = "managed_dns"
// value = "{{ managed_dns_domains_terraform_format }}"
// }
//
// set {
// name = "managed_dns_resolvers"
// value = "{{ managed_dns_resolvers_terraform_format }}"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// provisioner "local-exec" {
// command = <<EOT
//kubectl -n kube-system rollout restart deployment coredns
//EOT
// environment = {
// KUBECONFIG = local_file.kubeconfig.filename
// AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
// AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
// AWS_DEFAULT_REGION = "{{ aws_region }}"
// }
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// null_resource.delete_aws_default_coredns_config
// ]
//}

View File

@@ -1,40 +0,0 @@
//resource "helm_release" "externaldns" {
// name = "externaldns"
// chart = "common/charts/external-dns"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// values = [file("chart_values/external-dns.yaml")]
//
// set {
// name = "resources.limits.cpu"
// value = "50m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "50m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "50Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "50Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.cluster_autoscaler,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -61,61 +61,4 @@ POLICY
resource "aws_iam_user_policy_attachment" "grafana_cloudwatch_attachment" {
user = aws_iam_user.iam_grafana_cloudwatch.name
policy_arn = aws_iam_policy.grafana_cloudwatch_policy.arn
}
//locals {
// cloudflare_datasources = <<DATASOURCES
//datasources:
// datasources.yaml:
// apiVersion: 1
// datasources:
// - name: Prometheus
// type: prometheus
// url: "http://prometheus-operator-prometheus:9090"
// access: proxy
// isDefault: true
// - name: PromLoki
// type: prometheus
// url: "http://${helm_release.loki.name}.${helm_release.loki.namespace}.svc:3100/loki"
// access: proxy
// isDefault: false
// - name: Loki
// type: loki
// url: "http://${helm_release.loki.name}.${helm_release.loki.namespace}.svc:3100"
// - name: Cloudwatch
// type: cloudwatch
// jsonData:
// authType: keys
// defaultRegion: ${var.region}
// secureJsonData:
// accessKey: '${aws_iam_access_key.iam_grafana_cloudwatch.id}'
// secretKey: '${aws_iam_access_key.iam_grafana_cloudwatch.secret}'
//DATASOURCES
//}
//
//resource "helm_release" "grafana" {
// name = "grafana"
// chart = "common/charts/grafana"
// namespace = "prometheus"
// atomic = true
// max_history = 50
//
// values = [
// file("chart_values/grafana.yaml"),
// local.cloudflare_datasources,
// ]
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.cluster_autoscaler,
// helm_release.aws_vpc_cni,
// {% if metrics_history_enabled %}
// helm_release.prometheus_operator,
// {% endif %}
// ]
//}
}

View File

@@ -64,76 +64,4 @@ resource "aws_s3_bucket" "loki_bucket" {
"Name" = "Applications logs"
}
)
}
//resource "helm_release" "loki" {
// name = "loki"
// chart = "common/charts/loki"
// namespace = "logging"
// create_namespace = true
// atomic = true
// max_history = 50
//
// values = [file("chart_values/loki.yaml")]
//
// set {
// name = "config.storage_config.aws.s3"
// value = "s3://${urlencode(aws_iam_access_key.iam_eks_loki.id)}:${urlencode(aws_iam_access_key.iam_eks_loki.secret)}@${var.region}/${aws_s3_bucket.loki_bucket.bucket}"
// }
//
// set {
// name = "config.storage_config.aws.region"
// value = var.region
// }
//
// set {
// name = "config.storage_config.aws.access_key_id"
// value = aws_iam_access_key.iam_eks_loki.id
// }
//
// set {
// name = "config.storage_config.aws.secret_access_key"
// value = aws_iam_access_key.iam_eks_loki.secret
// }
// set {
// name = "config.storage_config.aws.sse_encryption"
// value = "true"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "2Gi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "1Gi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_iam_user.iam_eks_loki,
// aws_iam_access_key.iam_eks_loki,
// aws_s3_bucket.loki_bucket,
// aws_iam_policy.loki_s3_policy,
// aws_iam_user_policy_attachment.s3_loki_attachment,
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// ]
//}
}

View File

@@ -1,38 +0,0 @@
//resource "helm_release" "metrics_server" {
// name = "metrics-server"
// chart = "common/charts/metrics-server"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "resources.limits.cpu"
// value = "250m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "250m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "256Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "256Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// ]
//}

View File

@@ -24,70 +24,3 @@ resource "aws_iam_role_policy" "eks_cluster_ingress_loadbalancer_creation" {
}
POLICY
}
//
//resource "helm_release" "nginx_ingress" {
// name = "nginx-ingress"
// chart = "common/charts/nginx-ingress"
// namespace = "nginx-ingress"
// create_namespace = true
// atomic = true
// max_history = 50
//
// # Because of NLB, svc can take some time to start
// timeout = 300
// values = [file("chart_values/nginx-ingress.yaml")]
//
// # Controller resources
// set {
// name = "controller.resources.limits.cpu"
// value = "200m"
// }
//
// set {
// name = "controller.resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "controller.resources.limits.memory"
// value = "768Mi"
// }
//
// set {
// name = "controller.resources.requests.memory"
// value = "768Mi"
// }
//
// # Default backend resources
// set {
// name = "defaultBackend.resources.limits.cpu"
// value = "20m"
// }
//
// set {
// name = "defaultBackend.resources.requests.cpu"
// value = "10m"
// }
//
// set {
// name = "defaultBackend.resources.limits.memory"
// value = "32Mi"
// }
//
// set {
// name = "defaultBackend.resources.requests.memory"
// value = "32Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_iam_role_policy.eks_cluster_ingress_loadbalancer_creation,
// aws_eks_cluster.eks_cluster,
// helm_release.cluster_autoscaler,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -1,60 +0,0 @@
//resource "helm_release" "prometheus-adapter" {
// name = "prometheus-adapter"
// chart = "common/charts/prometheus-adapter"
// namespace = helm_release.prometheus_operator.namespace
// atomic = true
// max_history = 50
//
// set {
// name = "metricsRelistInterval"
// value = "30s"
// }
//
// set {
// name = "prometheus.url"
// value = "http://prometheus-operated.${helm_release.prometheus_operator.namespace}.svc"
// }
//
// # PDB
// set {
// name = "podDisruptionBudget.enabled"
// value = "true"
// }
//
// set {
// name = "podDisruptionBudget.maxUnavailable"
// value = "1"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "128Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "128Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.prometheus_operator,
// ]
//}

View File

@@ -1,131 +0,0 @@
//locals {
// prometheus_namespace = "prometheus"
//}
//
//resource "kubernetes_namespace" "prometheus_namespace" {
// metadata {
// name = local.prometheus_namespace
// }
//}
//
//resource "helm_release" "prometheus_operator" {
// name = "prometheus-operator"
// chart = "common/charts/prometheus-operator"
// namespace = local.prometheus_namespace
// // high timeout because on bootstrap, it's one of the biggest dependencies and on upgrade, it can takes time
// // to upgrade because of crd and the number of elements it has to deploy
// timeout = 480
// atomic = true
// max_history = 50
//
// values = [file("chart_values/prometheus_operator.yaml")]
//
// // avoid fake timestamp on any CRDs updates as takes a long time to be deployed and not needed if not regularly updated
//
// set {
// name = "nameOverride"
// value = "prometheus-operator"
// }
//
// set {
// name = "fullnameOverride"
// value = "prometheus-operator"
// }
//
// # Limits kube-state-metrics
// set {
// name = "kube-state-metrics.resources.limits.cpu"
// value = "100m"
// }
//
// set {
// name = "kube-state-metrics.resources.requests.cpu"
// value = "20m"
// }
//
// set {
// name = "kube-state-metrics.resources.limits.memory"
// value = "128Mi"
// }
//
// set {
// name = "kube-state-metrics.resources.requests.memory"
// value = "128Mi"
// }
//
// # Limits prometheus-node-exporter
// set {
// name = "prometheus-node-exporter.resources.limits.cpu"
// value = "20m"
// }
//
// set {
// name = "prometheus-node-exporter.resources.requests.cpu"
// value = "10m"
// }
//
// set {
// name = "prometheus-node-exporter.resources.limits.memory"
// value = "32Mi"
// }
//
// set {
// name = "prometheus-node-exporter.resources.requests.memory"
// value = "32Mi"
// }
//
// # Limits kube-state-metrics
// set {
// name = "kube-state-metrics.resources.limits.cpu"
// value = "30m"
// }
//
// set {
// name = "kube-state-metrics.resources.requests.cpu"
// value = "20m"
// }
//
// set {
// name = "kube-state-metrics.resources.limits.memory"
// value = "128Mi"
// }
//
// set {
// name = "kube-state-metrics.resources.requests.memory"
// value = "128Mi"
// }
//
// # Limits prometheusOperator
// set {
// name = "prometheusOperator.resources.limits.cpu"
// value = "1"
// }
//
// set {
// name = "prometheusOperator.resources.requests.cpu"
// value = "500m"
// }
//
// set {
// name = "prometheusOperator.resources.limits.memory"
// value = "1Gi"
// }
//
// set {
// name = "prometheusOperator.resources.requests.memory"
// value = "1Gi"
// }
//
//{% if test_cluster %}
// set {
// name = "defaultRules.config"
// value = "{}"
// }
//{% endif %}
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// kubernetes_namespace.prometheus_namespace,
// ]
//}

View File

@@ -1,50 +0,0 @@
//resource "helm_release" "promtail" {
// name = "promtail"
// chart = "common/charts/promtail"
// namespace = "kube-system"
// create_namespace = true
// atomic = true
// max_history = 50
//
// set {
// name = "loki.serviceName"
// value = "loki"
// }
//
// # it's mandatory to get this class to ensure paused infra will behave properly on restore
// set {
// name = "priorityClassName"
// value = "system-node-critical"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "100m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "128Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "128Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -1,17 +0,0 @@
//resource "helm_release" "q_storageclass" {
// name = "q-storageclass"
// chart = "charts/q-storageclass"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// ]
//}

View File

@@ -1,110 +0,0 @@
data "external" "get_agent_version_to_use" {
program = ["./helper.sh", "get_agent_version_to_use", var.qovery_agent_info.token, var.qovery_agent_info.api_fqdn, var.kubernetes_cluster_id]
}
//
//resource "random_id" "qovery_agent_id" {
// keepers = {
// # Generate a new id each time we add a new Agent id
// agent_id = var.qovery_agent_replicas
// }
//
// byte_length = 16
//}
//
//resource "helm_release" "qovery_agent_resources" {
// name = "qovery-agent"
// chart = "common/charts/qovery-agent"
// namespace = "qovery"
// atomic = true
// create_namespace = true
// max_history = 50
// force_update = true
// recreate_pods = true
//
// set {
// name = "image.tag"
// value = data.external.get_agent_version_to_use.result.version
// }
//
// set {
// name = "replicaCount"
// value = random_id.qovery_agent_id.keepers.agent_id
// }
//
// set {
// name = "environmentVariables.AGENT_ID"
// value = random_id.qovery_agent_id.hex
// }
//
// set {
// name = "environmentVariables.NATS_HOST_URL"
// value = var.qovery_nats_url
// }
//
// set {
// name = "environmentVariables.NATS_USERNAME"
// value = var.qovery_nats_user
// }
//
// set {
// name = "environmentVariables.NATS_PASSWORD"
// value = var.qovery_nats_password
// }
//
// set {
// name = "environmentVariables.LOKI_URL"
// value = "http://loki.logging.svc.cluster.local:3100"
// }
//
// set {
// name = "environmentVariables.CLOUD_REGION"
// value = var.region
// }
//
// set {
// name = "environmentVariables.CLOUD_PROVIDER"
// value = var.cloud_provider
// }
//
// set {
// name = "environmentVariables.KUBERNETES_ID"
// value = var.kubernetes_cluster_id
// }
//
// set {
// name = "environmentVariables.RUST_LOG"
// value = "DEBUG"
// }
//
// # Limits
// set {
// name = "resources.limits.cpu"
// value = "1"
// }
//
// set {
// name = "resources.requests.cpu"
// value = "200m"
// }
//
// set {
// name = "resources.limits.memory"
// value = "500Mi"
// }
//
// set {
// name = "resources.requests.memory"
// value = "500Mi"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// ]
//}

View File

@@ -1,131 +0,0 @@
data "external" "get_engine_version_to_use" {
program = ["./helper.sh", "get_engine_version_to_use", var.qovery_engine_info.token, var.qovery_engine_info.api_fqdn, var.kubernetes_cluster_id]
}
//
//resource "helm_release" "qovery_engine_resources" {
// name = "qovery-engine"
// chart = "common/charts/qovery-engine"
// namespace = "qovery"
// atomic = true
// create_namespace = true
// max_history = 50
// timeout = 600
// recreate_pods = false
//
// // need kubernetes 1.18, should be well tested before activating it
// set {
// name = "autoscaler.enabled"
// value = "false"
// }
//
// set {
// name = "metrics.enabled"
// value = var.metrics_history_enabled
// }
//
// set {
// name = "volumes.storageClassName"
// value = "aws-ebs-gp2-0"
// }
//
// set {
// name = "image.tag"
// value = data.external.get_engine_version_to_use.result.version
// }
//
// set {
// name = "environmentVariables.QOVERY_NATS_URL"
// value = var.qovery_nats_url
// }
//
// set {
// name = "environmentVariables.QOVERY_NATS_USER"
// value = var.qovery_nats_user
// }
//
// set {
// name = "environmentVariables.QOVERY_NATS_PASSWORD"
// value = var.qovery_nats_password
// }
//
// set {
// name = "environmentVariables.ORGANIZATION"
// value = var.organization_id
// }
//
// set {
// name = "environmentVariables.CLOUD_PROVIDER"
// value = var.cloud_provider
// }
//
// set {
// name = "environmentVariables.REGION"
// value = var.region
// }
//
// set {
// name = "environmentVariables.LIB_ROOT_DIR"
// value = "/home/qovery/lib"
// }
//
// set {
// name = "environmentVariables.DOCKER_HOST"
// value = "tcp://0.0.0.0:2375"
// }
//
// # Engine Limits
// set {
// name = "engineResources.limits.cpu"
// value = "1"
// }
//
// set {
// name = "engineResources.requests.cpu"
// value = "500m"
// }
//
// set {
// name = "engineResources.limits.memory"
// value = "512Mi"
// }
//
// set {
// name = "engineResources.requests.memory"
// value = "512Mi"
// }
//
// # Build limits
// set {
// name = "buildResources.limits.cpu"
// value = "1"
// }
//
// set {
// name = "buildResources.requests.cpu"
// value = "500m"
// }
//
// set {
// name = "buildResources.limits.memory"
// value = "4Gi"
// }
//
// set {
// name = "buildResources.requests.memory"
// value = "4Gi"
// }
//
// set {
// name = "forced_upgrade"
// value = timestamp()
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// helm_release.aws_vpc_cni,
// helm_release.cluster_autoscaler,
// {% if metrics_history_enabled %}
// helm_release.prometheus-adapter,
// {% endif %}
// ]
//}

View File

@@ -31,84 +31,12 @@ function get_connection_details() { ## print environment variables to connect to
echo 'export KUBECONFIG={{ s3_kubeconfig_bucket }}/{{ kubernetes_cluster_id }}.yaml'
}
# Todo: do it engine side when terraform/helm split will be done
function is_cni_old_installed_version() { ## Check if an old CNI version is already installed
export AWS_ACCESS_KEY_ID="{{ aws_access_key }}"
export AWS_SECRET_ACCESS_KEY="{{ aws_secret_key }}"
export AWS_DEFAULT_REGION="{{ aws_region }}"
export KUBECONFIG={{ s3_kubeconfig_bucket }}/{{ kubernetes_cluster_id }}.yaml
# shellcheck disable=SC2046
if [ $(kubectl -n kube-system get ds aws-node -o json | jq -c '.spec.selector.matchLabels' | grep -c '"k8s-app":"aws-node"') -eq 1 ] ; then
echo '{"is_cni_old_installed_version": "true"}'
else
echo '{"is_cni_old_installed_version": "false"}'
fi
exit 0
}
function enable_cni_managed_by_helm() { ## Check if an old CNI version is already installed
export AWS_ACCESS_KEY_ID="{{ aws_access_key }}"
export AWS_SECRET_ACCESS_KEY="{{ aws_secret_key }}"
export AWS_DEFAULT_REGION="{{ aws_region }}"
export KUBECONFIG={{ s3_kubeconfig_bucket }}/{{ kubernetes_cluster_id }}.yaml
set +e
# shellcheck disable=SC2046
if [ "$(kubectl -n kube-system get daemonset -l k8s-app=aws-node,app.kubernetes.io/managed-by=Helm 2>&1 | grep -ic 'No resources found')" == "0" ] ; then
exit 0
fi
for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do
echo "setting annotations and labels on $kind/aws-node"
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=aws-vpc-cni
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm
done
exit 0
}
function get_engine_version_to_use() { ## get the engine version for a given cluster. Args: token, api_fqdn, cluster_id
ENGINE_VERSION_CONTROLLER_TOKEN=$1
API_FQDN=$2
CLUSTER_ID=$3
API_URL="https://$API_FQDN/api/v1/engine-version"
curl -s -H "X-Qovery-Signature: $ENGINE_VERSION_CONTROLLER_TOKEN" "$API_URL?type=cluster&clusterId=$CLUSTER_ID" && exit 0
}
function get_agent_version_to_use() { ## get the agent version for a given cluster. Args: token, api_fqdn, cluster_id
AGENT_VERSION_CONTROLLER_TOKEN=$1
API_FQDN=$2
CLUSTER_ID=$3
API_URL="https://$API_FQDN/api/v1/agent-version"
curl -s -H "X-Qovery-Signature: $AGENT_VERSION_CONTROLLER_TOKEN" "$API_URL?type=cluster&clusterId=$CLUSTER_ID" && exit 0
}
case $1 in
get_engine_version_to_use)
check_args 3
get_engine_version_to_use "$2" "$3" "$4"
;;
get_agent_version_to_use)
check_args 3
get_agent_version_to_use "$2" "$3" "$4"
;;
get_connection_details)
get_connection_details
;;
is_cni_old_installed_version)
is_cni_old_installed_version
;;
enable_cni_managed_by_helm)
enable_cni_managed_by_helm
;;
*)
help
exit 1
;;
esac
# If ok return nothing
echo "{}"
esac

View File

@@ -1,35 +1,15 @@
locals {
qovery_tf_config = <<TF_CONFIG
{
"cloud_provider": "${var.cloud_provider}",
"region": "${var.region}",
"cluster_name": "${var.kubernetes_cluster_name}",
"cluster_id": "${var.kubernetes_cluster_id}",
"organization_id": "${var.organization_id}",
"test_cluster": "${var.test_cluster}",
"aws_access_key_id": "{{ aws_access_key }}",
"aws_secret_access_key": "{{ aws_secret_key }}",
"external_dns_provider": "{{ external_dns_provider }}",
"dns_email_report": "{{ dns_email_report }}",
"acme_server_url": "{{ acme_server_url }}",
"managed_dns_domains_terraform_format": "{{ managed_dns_domains_terraform_format }}",
"cloudflare_api_token": "{{ cloudflare_api_token }}",
"cloudflare_email": "{{ cloudflare_email }}",
"feature_flag_metrics_history": "{% if metrics_history_enabled %}true{% else %}false{% endif %}",
"aws_iam_eks_user_mapper_key": "${aws_iam_access_key.iam_eks_user_mapper.id}",
"aws_iam_eks_user_mapper_secret": "${aws_iam_access_key.iam_eks_user_mapper.secret}",
"aws_iam_cluster_autoscaler_key": "${aws_iam_access_key.iam_eks_cluster_autoscaler.id}",
"aws_iam_cluster_autoscaler_secret": "${aws_iam_access_key.iam_eks_cluster_autoscaler.secret}",
"managed_dns_resolvers_terraform_format": "{{ managed_dns_resolvers_terraform_format }}",
"feature_flag_log_history": "{% if log_history_enabled %}true{% else %}false{% endif %}",
"aws_iam_cloudwatch_key": "${aws_iam_access_key.iam_grafana_cloudwatch.id}",
"aws_iam_cloudwatch_secret": "${aws_iam_access_key.iam_grafana_cloudwatch.secret}",
"loki_storage_config_aws_s3": "s3://${urlencode(aws_iam_access_key.iam_eks_loki.id)}:${urlencode(aws_iam_access_key.iam_eks_loki.secret)}@${var.region}/${aws_s3_bucket.loki_bucket.bucket}",
"aws_iam_loki_storage_key": "${aws_iam_access_key.iam_eks_loki.id}",
"aws_iam_loki_storage_secret": "${aws_iam_access_key.iam_eks_loki.secret}",
"qovery_agent_version": "${data.external.get_agent_version_to_use.result.version}",
"qovery_engine_version": "${data.external.get_agent_version_to_use.result.version}",
"nats_host_url": "${var.qovery_nats_url}",
"nats_username": "${var.qovery_nats_user}",
"nats_password": "${var.qovery_nats_password}"
"aws_iam_loki_storage_secret": "${aws_iam_access_key.iam_eks_loki.secret}"
}
TF_CONFIG
}

View File

@@ -86,14 +86,14 @@ variable "eks_k8s_versions" {
}
variable "kubernetes_cluster_id" {
description = "Kubernetes cluster name"
description = "Kubernetes cluster id"
default = "{{ kubernetes_cluster_id }}"
type = string
}
variable "kubernetes_cluster_name" {
description = "Kubernetes cluster name"
default = "qovery-{{ kubernetes_cluster_name }}"
default = "qovery-{{ kubernetes_cluster_id }}"
type = string
}
@@ -288,13 +288,6 @@ variable "metrics_history_enabled" {
type = bool
}
# Force helm upgrade
variable "forced_upgrade" {
description = "Force upgrade"
default = {% if force_upgrade %}timestamp(){% else %}"false"{% endif %}
type = string
}
{%- if resource_expiration_in_seconds is defined %}
# Pleco ttl
variable "resource_expiration_in_seconds" {

View File

@@ -16,6 +16,7 @@ spec:
metadata:
{{- with .Values.podAnnotations }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:

View File

@@ -1,6 +1,9 @@
use crate::cloud_provider::aws::kubernetes::InfraOptions;
use crate::cloud_provider::helm::{
get_chart_namespace, ChartInfo, ChartSetValue, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces,
get_chart_namespace, ChartInfo, ChartSetValue, ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmChart,
HelmChartNamespaces,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::cmd::kubectl::{kubectl_exec_get_daemonset, kubectl_exec_with_output};
use crate::error::{SimpleError, SimpleErrorKind};
use serde::{Deserialize, Serialize};
@@ -12,51 +15,51 @@ use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AwsQoveryTerraformConfig {
pub cloud_provider: String,
pub region: String,
pub cluster_name: String,
pub cluster_id: String,
pub organization_id: String,
pub test_cluster: String,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
// feature flags
pub feature_flag_metrics_history: String,
pub feature_flag_log_history: String,
// nats
pub nats_host_url: String,
pub nats_username: String,
pub nats_password: String,
pub aws_iam_eks_user_mapper_key: String,
pub aws_iam_eks_user_mapper_secret: String,
pub aws_iam_cluster_autoscaler_key: String,
pub aws_iam_cluster_autoscaler_secret: String,
// dns
pub managed_dns_resolvers_terraform_format: String,
pub external_dns_provider: String,
pub dns_email_report: String,
pub cloudflare_api_token: String,
pub cloudflare_email: String,
// tls
pub acme_server_url: String,
pub managed_dns_domains_terraform_format: String,
// logs
pub aws_iam_cloudwatch_key: String,
pub aws_iam_cloudwatch_secret: String,
pub loki_storage_config_aws_s3: String,
pub aws_iam_loki_storage_key: String,
pub aws_iam_loki_storage_secret: String,
// qovery
pub qovery_agent_version: String,
pub qovery_engine_version: String,
}
pub struct ChartsConfigPrerequisites {
pub organization_id: String,
pub cluster_id: String,
pub region: String,
pub cluster_name: String,
pub cloud_provider: String,
pub test_cluster: bool,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
pub ff_log_history_enabled: bool,
pub ff_metrics_history_enabled: bool,
pub managed_dns_helm_format: String,
pub managed_dns_resolvers_terraform_format: String,
pub external_dns_provider: String,
pub dns_email_report: String,
pub acme_url: String,
pub cloudflare_email: String,
pub cloudflare_api_token: String,
// qovery options form json input
pub infra_options: InfraOptions,
}
pub fn aws_helm_charts(
qovery_terraform_config_file: &str,
chart_config_prerequisites: &ChartsConfigPrerequisites,
chart_prefix_path: Option<&str>,
kubernetes_config: &Path,
envs: &[(String, String)],
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
let chart_prefix = match chart_prefix_path {
None => "./",
Some(x) => x,
};
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
let content_file = File::open(&qovery_terraform_config_file)?;
let reader = BufReader::new(content_file);
let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) {
@@ -75,25 +78,25 @@ pub fn aws_helm_charts(
let prometheus_namespace = HelmChartNamespaces::Prometheus;
let loki_namespace = HelmChartNamespaces::Logging;
let loki_service_name = "loki".to_string();
let loki_kube_dns_prefix = format!("loki.{}.svc", get_chart_namespace(loki_namespace));
// Qovery storage class
let q_storage_class = CommonChart {
chart_info: ChartInfo {
name: "q-storageclass".to_string(),
path: format!("{}/charts/q-storageclass", &chart_prefix),
path: chart_path("/charts/q-storageclass"),
..Default::default()
},
};
let aws_vpc_cni_chart = AwsVpcCniChart {
let mut aws_vpc_cni_chart = AwsVpcCniChart {
chart_info: ChartInfo {
name: "aws-node".to_string(),
path: format!("{}/charts/aws-vpc-cni", &chart_prefix),
name: "aws-vpc-cni".to_string(),
path: chart_path("charts/aws-vpc-cni"),
values: vec![
ChartSetValue {
key: "image.region".to_string(),
value: qovery_terraform_config.region.clone(),
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "image.pullPolicy".to_string(),
@@ -103,35 +106,47 @@ pub fn aws_helm_charts(
key: "crd.create".to_string(),
value: "false".to_string(),
},
// label ENIs
ChartSetValue {
key: "env.CLUSTER_NAME".to_string(),
value: qovery_terraform_config.cluster_name.clone(),
value: chart_config_prerequisites.cluster_name.clone(),
},
// number of total IP addresses that the daemon should attempt to allocate for pod assignment on the node (init phase)
ChartSetValue {
key: "env.MINIMUM_IP_TARGET".to_string(),
value: "60".to_string(),
},
// number of free IP addresses that the daemon should attempt to keep available for pod assignment on the node
ChartSetValue {
key: "env.WARM_IP_TARGET".to_string(),
value: "10".to_string(),
},
// maximum number of ENIs that will be attached to the node (k8s recommend to avoid going over 100)
ChartSetValue {
key: "env.MAX_ENI".to_string(),
value: "100".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "50".to_string(),
value: "50m".to_string(),
},
],
..Default::default()
},
};
let is_cni_old_installed_version = match aws_vpc_cni_chart.is_cni_old_installed_version(kubernetes_config, &envs) {
Ok(x) => x,
Err(e) => return Err(e),
};
aws_vpc_cni_chart.chart_info.values.push(ChartSetValue {
key: "originalMatchLabels".to_string(),
value: is_cni_old_installed_version.to_string(),
});
let aws_iam_eks_user_mapper = CommonChart {
chart_info: ChartInfo {
name: "iam-eks-user-mapper".to_string(),
path: "charts/iam-eks-user-mapper".to_string(),
path: chart_path("charts/iam-eks-user-mapper"),
values: vec![
ChartSetValue {
key: "aws.accessKey".to_string(),
@@ -143,7 +158,7 @@ pub fn aws_helm_charts(
},
ChartSetValue {
key: "image.region".to_string(),
value: qovery_terraform_config.region.clone(),
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "syncIamGroup".to_string(),
@@ -174,7 +189,7 @@ pub fn aws_helm_charts(
let aws_node_term_handler = CommonChart {
chart_info: ChartInfo {
name: "aws-node-term-handler".to_string(),
path: "charts/aws-node-termination-handler".to_string(),
path: chart_path("charts/aws-node-termination-handler"),
values: vec![
ChartSetValue {
key: "nameOverride".to_string(),
@@ -217,7 +232,7 @@ pub fn aws_helm_charts(
let aws_calico = CommonChart {
chart_info: ChartInfo {
name: "calico".to_string(),
path: "charts/aws-calico".to_string(),
path: chart_path("charts/aws-calico"),
..Default::default()
},
};
@@ -225,19 +240,19 @@ pub fn aws_helm_charts(
let cluster_autoscaler = CommonChart {
chart_info: ChartInfo {
name: "cluster-autoscaler".to_string(),
path: "common/charts/cluster-autoscaler".to_string(),
path: chart_path("common/charts/cluster-autoscaler"),
values: vec![
ChartSetValue {
key: "cloudProvider".to_string(),
value: "aws".to_string(),
value: chart_config_prerequisites.cloud_provider.clone(),
},
ChartSetValue {
key: "awsRegion".to_string(),
value: qovery_terraform_config.region.clone(),
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "autoDiscovery.clusterName".to_string(),
value: qovery_terraform_config.cluster_name.clone(),
value: chart_config_prerequisites.cluster_name.clone(),
},
ChartSetValue {
key: "awsAccessKeyID".to_string(),
@@ -260,7 +275,7 @@ pub fn aws_helm_charts(
// observability
ChartSetValue {
key: "serviceMonitor.enabled".to_string(),
value: qovery_terraform_config.feature_flag_metrics_history.clone(),
value: chart_config_prerequisites.ff_metrics_history_enabled.to_string(),
},
ChartSetValue {
key: "serviceMonitor.namespace".to_string(),
@@ -291,15 +306,17 @@ pub fn aws_helm_charts(
let coredns_config = CoreDNSConfigChart {
chart_info: ChartInfo {
name: "coredns".to_string(),
path: format!("{}/charts/coredns-config", &chart_prefix),
path: chart_path("/charts/coredns-config"),
values: vec![
ChartSetValue {
key: "managed_dns".to_string(),
value: qovery_terraform_config.managed_dns_resolvers_terraform_format.clone(),
value: chart_config_prerequisites.managed_dns_helm_format.clone(),
},
ChartSetValue {
key: "managed_dns_resolvers".to_string(),
value: qovery_terraform_config.managed_dns_resolvers_terraform_format,
value: chart_config_prerequisites
.managed_dns_resolvers_terraform_format
.clone(),
},
],
..Default::default()
@@ -309,8 +326,8 @@ pub fn aws_helm_charts(
let external_dns = CommonChart {
chart_info: ChartInfo {
name: "externaldns".to_string(),
path: "common/charts/external-dns".to_string(),
values_files: vec!["chart_values/external-dns.yaml".to_string()],
path: chart_path("common/charts/external-dns"),
values_files: vec![chart_path("chart_values/external-dns.yaml")],
values: vec![
// resources limits
ChartSetValue {
@@ -337,12 +354,13 @@ pub fn aws_helm_charts(
let promtail = CommonChart {
chart_info: ChartInfo {
name: "promtail".to_string(),
path: "common/charts/promtail".to_string(),
namespace: loki_namespace,
path: chart_path("common/charts/promtail"),
// because of priorityClassName, we need to add it to kube-system
namespace: HelmChartNamespaces::KubeSystem,
values: vec![
ChartSetValue {
key: "loki.serviceName".to_string(),
value: loki_service_name.clone(),
value: loki_kube_dns_prefix.clone(),
},
// it's mandatory to get this class to ensure paused infra will behave properly on restore
ChartSetValue {
@@ -374,9 +392,9 @@ pub fn aws_helm_charts(
let loki = CommonChart {
chart_info: ChartInfo {
name: "loki".to_string(),
path: "common/charts/loki".to_string(),
path: chart_path("common/charts/loki"),
namespace: loki_namespace,
values_files: vec!["chart_values/loki.yaml".to_string()],
values_files: vec![chart_path("chart_values/loki.yaml")],
values: vec![
ChartSetValue {
key: "config.storage_config.aws.s3".to_string(),
@@ -384,7 +402,7 @@ pub fn aws_helm_charts(
},
ChartSetValue {
key: "config.storage_config.aws.region".to_string(),
value: qovery_terraform_config.region.clone(),
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "aws_iam_loki_storage_key".to_string(),
@@ -423,12 +441,12 @@ pub fn aws_helm_charts(
let mut prometheus_operator = CommonChart {
chart_info: ChartInfo {
name: "prometheus-operator".to_string(),
path: "common/charts/prometheus-operator".to_string(),
namespace: HelmChartNamespaces::Logging,
path: chart_path("/common/charts/prometheus-operator"),
namespace: prometheus_namespace,
// high timeout because on bootstrap, it's one of the biggest dependencies and on upgrade, it can takes time
// to upgrade because of the CRD and the number of elements it has to deploy
timeout: "480".to_string(),
values_files: vec!["chart_values/prometheus_operator.yaml".to_string()],
values_files: vec![chart_path("chart_values/prometheus_operator.yaml")],
values: vec![
ChartSetValue {
key: "nameOverride".to_string(),
@@ -510,7 +528,7 @@ pub fn aws_helm_charts(
..Default::default()
},
};
if &qovery_terraform_config.test_cluster == "true" {
if chart_config_prerequisites.test_cluster {
prometheus_operator.chart_info.values.push(ChartSetValue {
key: "defaultRules.config".to_string(),
value: "{}".to_string(),
@@ -520,8 +538,8 @@ pub fn aws_helm_charts(
let prometheus_adapter = CommonChart {
chart_info: ChartInfo {
name: "prometheus-adapter".to_string(),
path: "common/charts/prometheus-adapter".to_string(),
namespace: HelmChartNamespaces::Logging,
path: chart_path("common/charts/prometheus-adapter"),
namespace: prometheus_namespace,
values: vec![
ChartSetValue {
key: "metricsRelistInterval".to_string(),
@@ -567,7 +585,7 @@ pub fn aws_helm_charts(
let metric_server = CommonChart {
chart_info: ChartInfo {
name: "metrics-server".to_string(),
path: "common/charts/metrics-server".to_string(),
path: chart_path("common/charts/metrics-server"),
values: vec![
ChartSetValue {
key: "resources.limits.cpu".to_string(),
@@ -590,13 +608,84 @@ pub fn aws_helm_charts(
},
};
// todo: add custom datasource to values_file
let kube_state_metrics = CommonChart {
chart_info: ChartInfo {
name: "kube-state-metrics".to_string(),
namespace: HelmChartNamespaces::Prometheus,
path: chart_path("common/charts/kube-state-metrics"),
values: vec![
ChartSetValue {
key: "prometheus.monitor.enabled".to_string(),
value: "true".to_string(),
},
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "75m".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "75m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "128Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "128Mi".to_string(),
},
],
..Default::default()
},
};
let grafana_datasources = format!(
"
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: \"http://prometheus-operator-prometheus:9090\"
access: proxy
isDefault: true
- name: PromLoki
type: prometheus
url: \"http://{}.{}.svc:3100/loki\"
access: proxy
isDefault: false
- name: Loki
type: loki
url: \"http://{}.{}.svc:3100\"
- name: Cloudwatch
type: cloudwatch
jsonData:
authType: keys
defaultRegion: {}
secureJsonData:
accessKey: '{}'
secretKey: '{}'
",
&loki.chart_info.name,
get_chart_namespace(loki_namespace),
&loki.chart_info.name,
get_chart_namespace(loki_namespace),
chart_config_prerequisites.region,
qovery_terraform_config.aws_iam_cloudwatch_key,
qovery_terraform_config.aws_iam_cloudwatch_secret,
);
let grafana = CommonChart {
chart_info: ChartInfo {
name: "grafana".to_string(),
path: "common/charts/grafana".to_string(),
path: chart_path("common/charts/grafana"),
namespace: prometheus_namespace,
values_files: vec!["chart_values/grafana.yaml".to_string()],
values_files: vec![chart_path("chart_values/grafana.yaml")],
yaml_files_content: vec![ChartValuesGenerated {
filename: "grafana_generated.yaml".to_string(),
yaml_content: grafana_datasources,
}],
..Default::default()
},
};
@@ -604,9 +693,9 @@ pub fn aws_helm_charts(
let cert_manager = CommonChart {
chart_info: ChartInfo {
name: "cert-manager".to_string(),
path: "common/charts/cert-manager".to_string(),
path: chart_path("common/charts/cert-manager"),
namespace: HelmChartNamespaces::CertManager,
values_files: vec!["chart_values/cert-manager.yaml".to_string()],
values_files: vec![chart_path("chart_values/cert-manager.yaml")],
values: vec![
ChartSetValue {
key: "installCRDs".to_string(),
@@ -624,7 +713,7 @@ pub fn aws_helm_charts(
},
ChartSetValue {
key: "prometheus.servicemonitor.enabled".to_string(),
value: qovery_terraform_config.feature_flag_metrics_history.clone(),
value: chart_config_prerequisites.ff_metrics_history_enabled.to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.prometheusInstance".to_string(),
@@ -689,48 +778,48 @@ pub fn aws_helm_charts(
let mut cert_manager_config = CommonChart {
chart_info: ChartInfo {
name: "cert-manager-configs".to_string(),
path: "common/charts/cert-manager-configs".to_string(),
path: chart_path("common/charts/cert-manager-configs"),
namespace: HelmChartNamespaces::CertManager,
values: vec![
ChartSetValue {
key: "externalDnsProvider".to_string(),
value: qovery_terraform_config.external_dns_provider.clone(),
value: chart_config_prerequisites.external_dns_provider.clone(),
},
ChartSetValue {
key: "acme.letsEncrypt.emailReport".to_string(),
value: qovery_terraform_config.dns_email_report,
value: chart_config_prerequisites.dns_email_report.clone(),
},
ChartSetValue {
key: "acme.letsEncrypt.acmeUrl".to_string(),
value: qovery_terraform_config.acme_server_url,
value: chart_config_prerequisites.acme_url.clone(),
},
ChartSetValue {
key: "managedDns".to_string(),
value: qovery_terraform_config.managed_dns_domains_terraform_format,
value: chart_config_prerequisites.managed_dns_helm_format.clone(),
},
],
..Default::default()
},
};
if &qovery_terraform_config.external_dns_provider == "cloudflare" {
if chart_config_prerequisites.external_dns_provider == "cloudflare" {
cert_manager_config.chart_info.values.push(ChartSetValue {
key: "cloudflare_api_token".to_string(),
value: qovery_terraform_config.cloudflare_api_token,
key: "provider.cloudflare.apiToken".to_string(),
value: chart_config_prerequisites.cloudflare_api_token.clone(),
});
cert_manager_config.chart_info.values.push(ChartSetValue {
key: "cloudflare_email".to_string(),
value: qovery_terraform_config.cloudflare_email,
key: "provider.cloudflare.email".to_string(),
value: chart_config_prerequisites.cloudflare_email.clone(),
})
}
let nginx_ingress = CommonChart {
chart_info: ChartInfo {
name: "nginx-ingress".to_string(),
path: "common/charts/nginx-ingress".to_string(),
path: chart_path("common/charts/nginx-ingress"),
namespace: HelmChartNamespaces::NginxIngress,
// Because of NLB, svc can take some time to start
timeout: "300".to_string(),
values_files: vec!["chart_values/nginx-ingress.yaml".to_string()],
values_files: vec![chart_path("chart_values/nginx-ingress.yaml")],
values: vec![
// Controller resources limits
ChartSetValue {
@@ -771,19 +860,19 @@ pub fn aws_helm_charts(
},
};
// todo: add missing parameters
let pleco = CommonChart {
chart_info: ChartInfo {
name: "pleco".to_string(),
path: "common/charts/pleco".to_string(),
path: chart_path("common/charts/pleco"),
values_files: vec![chart_path("chart_values/pleco.yaml")],
values: vec![
ChartSetValue {
key: "environmentVariables.AWS_ACCESS_KEY_ID".to_string(),
value: qovery_terraform_config.aws_access_key_id,
value: chart_config_prerequisites.aws_access_key_id.clone(),
},
ChartSetValue {
key: "environmentVariables.AWS_SECRET_ACCESS_KEY".to_string(),
value: qovery_terraform_config.aws_secret_access_key,
value: chart_config_prerequisites.aws_secret_access_key.clone(),
},
ChartSetValue {
key: "environmentVariables.LOG_LEVEL".to_string(),
@@ -794,16 +883,29 @@ pub fn aws_helm_charts(
},
};
let qovery_agent_version: QoveryAgent = match get_qovery_app_version(
QoveryAppName::Agent,
&chart_config_prerequisites.infra_options.agent_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("Qovery agent version couldn't be retrieved. {}", e)),
})
}
};
let qovery_agent = CommonChart {
chart_info: ChartInfo {
name: "qovery-agent".to_string(),
path: "common/charts/qovery-agent".to_string(),
path: chart_path("common/charts/qovery-agent"),
namespace: HelmChartNamespaces::Qovery,
values: vec![
// todo: directly get version from the engine, not from terraform helper
ChartSetValue {
key: "image.tag".to_string(),
value: qovery_terraform_config.qovery_agent_version,
value: qovery_agent_version.version,
},
ChartSetValue {
key: "replicaCount".to_string(),
@@ -811,35 +913,34 @@ pub fn aws_helm_charts(
},
ChartSetValue {
key: "environmentVariables.NATS_HOST_URL".to_string(),
value: qovery_terraform_config.nats_host_url.clone(),
value: chart_config_prerequisites.infra_options.qovery_nats_url.to_string(),
},
ChartSetValue {
key: "environmentVariables.NATS_USERNAME".to_string(),
value: qovery_terraform_config.nats_username.clone(),
value: chart_config_prerequisites.infra_options.qovery_nats_user.to_string(),
},
ChartSetValue {
key: "environmentVariables.NATS_PASSWORD".to_string(),
value: qovery_terraform_config.nats_password.clone(),
value: chart_config_prerequisites
.infra_options
.qovery_nats_password
.to_string(),
},
ChartSetValue {
key: "environmentVariables.LOKI_URL".to_string(),
value: format!(
"http://{}.{}.svc.cluster.local:3100",
loki_service_name,
get_chart_namespace(loki_namespace)
),
value: format!("http://{}.cluster.local:3100", loki_kube_dns_prefix),
},
ChartSetValue {
key: "environmentVariables.CLOUD_REGION".to_string(),
value: qovery_terraform_config.region.clone(),
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "environmentVariables.CLOUD_PROVIDER".to_string(),
value: qovery_terraform_config.cloud_provider.clone(),
value: chart_config_prerequisites.cloud_provider.clone(),
},
ChartSetValue {
key: "environmentVariables.KUBERNETES_ID".to_string(),
value: qovery_terraform_config.cluster_id,
value: chart_config_prerequisites.cluster_id.clone(),
},
// resources limits
ChartSetValue {
@@ -863,16 +964,29 @@ pub fn aws_helm_charts(
},
};
let qovery_engine_version: QoveryEngine = match get_qovery_app_version(
QoveryAppName::Engine,
&chart_config_prerequisites.infra_options.engine_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("Qovery engine version couldn't be retrieved. {}", e)),
})
}
};
let qovery_engine = CommonChart {
chart_info: ChartInfo {
name: "qovery-engine".to_string(),
path: "common/charts/qovery-engine".to_string(),
path: chart_path("common/charts/qovery-engine"),
namespace: HelmChartNamespaces::Qovery,
values: vec![
// todo: directly get version from the engine, not from terraform
ChartSetValue {
key: "image.tag".to_string(),
value: qovery_terraform_config.qovery_engine_version,
value: qovery_engine_version.version,
},
// need kubernetes 1.18, should be well tested before activating it
ChartSetValue {
@@ -888,28 +1002,31 @@ pub fn aws_helm_charts(
value: "aws-ebs-gp2-0".to_string(),
},
ChartSetValue {
key: "environmentVariables.NATS_HOST_URL".to_string(),
value: qovery_terraform_config.nats_host_url,
key: "environmentVariables.QOVERY_NATS_URL".to_string(),
value: chart_config_prerequisites.infra_options.qovery_nats_url.to_string(),
},
ChartSetValue {
key: "environmentVariables.NATS_USERNAME".to_string(),
value: qovery_terraform_config.nats_username,
key: "environmentVariables.QOVERY_NATS_USER".to_string(),
value: chart_config_prerequisites.infra_options.qovery_nats_user.to_string(),
},
ChartSetValue {
key: "environmentVariables.NATS_PASSWORD".to_string(),
value: qovery_terraform_config.nats_password,
key: "environmentVariables.QOVERY_NATS_PASSWORD".to_string(),
value: chart_config_prerequisites
.infra_options
.qovery_nats_password
.to_string(),
},
ChartSetValue {
key: "environmentVariables.ORGANIZATION".to_string(),
value: qovery_terraform_config.organization_id,
value: chart_config_prerequisites.organization_id.clone(),
},
ChartSetValue {
key: "environmentVariables.CLOUD_PROVIDER".to_string(),
value: qovery_terraform_config.cloud_provider,
value: chart_config_prerequisites.cloud_provider.clone(),
},
ChartSetValue {
key: "environmentVariables.REGION".to_string(),
value: qovery_terraform_config.region,
value: chart_config_prerequisites.region.clone(),
},
ChartSetValue {
key: "environmentVariables.LIB_ROOT_DIR".to_string(),
@@ -921,36 +1038,36 @@ pub fn aws_helm_charts(
},
// engine resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
key: "engineResources.limits.cpu".to_string(),
value: "1".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
key: "engineResources.requests.cpu".to_string(),
value: "500m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
key: "engineResources.limits.memory".to_string(),
value: "512Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
key: "engineResources.requests.memory".to_string(),
value: "512Mi".to_string(),
},
// build resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
key: "buildResources.limits.cpu".to_string(),
value: "1".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
key: "buildResources.requests.cpu".to_string(),
value: "500m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
key: "buildResources.limits.memory".to_string(),
value: "4Gi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
key: "buildResources.requests.memory".to_string(),
value: "4Gi".to_string(),
},
],
@@ -968,41 +1085,38 @@ pub fn aws_helm_charts(
let mut level_2: Vec<Box<dyn HelmChart>> = vec![];
let mut level_3: Vec<Box<dyn HelmChart>> = vec![
// Box::new(cluster_autoscaler),
// Box::new(aws_iam_eks_user_mapper),
// Box::new(aws_calico),
Box::new(cluster_autoscaler),
Box::new(aws_iam_eks_user_mapper),
Box::new(aws_calico),
];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![
// Box::new(metric_server),
// Box::new(aws_node_term_handler),
// Box::new(external_dns),
Box::new(metric_server),
Box::new(aws_node_term_handler),
Box::new(external_dns),
];
let level_5: Vec<Box<dyn HelmChart>> = vec![
// Box::new(nginx_ingress), Box::new(cert_manager), Box::new(pleco)
];
let level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress), Box::new(cert_manager), Box::new(pleco)];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![
// Box::new(cert_manager_config),
// Box::new(qovery_agent),
// Box::new(qovery_engine),
Box::new(cert_manager_config),
Box::new(qovery_agent),
Box::new(qovery_engine),
];
// if &qovery_terraform_config.feature_flag_metrics_history == "true" {
// level_2.push(Box::new(prometheus_operator));
// level_4.push(Box::new(prometheus_adapter));
// }
// if &qovery_terraform_config.feature_flag_log_history == "true" {
// level_3.push(Box::new(promtail));
// level_4.push(Box::new(loki));
// }
//
// if &qovery_terraform_config.feature_flag_metrics_history == "true"
// || &qovery_terraform_config.feature_flag_log_history == "true"
// {
// level_6.push(Box::new(grafana))
// };
if chart_config_prerequisites.ff_metrics_history_enabled {
level_2.push(Box::new(prometheus_operator));
level_4.push(Box::new(prometheus_adapter));
level_4.push(Box::new(kube_state_metrics));
}
if chart_config_prerequisites.ff_log_history_enabled {
level_3.push(Box::new(promtail));
level_4.push(Box::new(loki));
}
if chart_config_prerequisites.ff_metrics_history_enabled || chart_config_prerequisites.ff_log_history_enabled {
level_6.push(Box::new(grafana))
};
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6])
}
@@ -1030,53 +1144,66 @@ impl HelmChart for AwsVpcCniChart {
for kind in kinds {
info!("setting annotations and labels on {}/aws-node", &kind);
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
environment_variables.clone(),
|_| {},
|_| {},
);
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
"meta.helm.sh/release-namespace=kube-system",
],
environment_variables.clone(),
|_| {},
|_| {},
);
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
kind,
"aws-node",
"app.kubernetes.io/managed-by=Helm",
],
environment_variables.clone(),
|_| {},
|_| {},
);
let steps = || -> Result<(), SimpleError> {
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
"meta.helm.sh/release-namespace=kube-system",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
kind,
"aws-node",
"app.kubernetes.io/managed-by=Helm",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
Ok(())
};
if let Err(e) = steps() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while adding annotations for AWS VPC CNI. {:?}",
e.message
)),
});
}
}
info!("AWS CNI successfully deployed");
// sleep in order to be sure the daemonset is updated
sleep(Duration::from_secs(20))
sleep(Duration::from_secs(30))
}
false => info!("AWS CNI is already supported by Helm, nothing to do"),
};
@@ -1086,6 +1213,51 @@ impl HelmChart for AwsVpcCniChart {
}
impl AwsVpcCniChart {
// this is required to know if we need to keep old annotation/labels values or not
fn is_cni_old_installed_version(
&self,
kubernetes_config: &Path,
envs: &[(String, String)],
) -> Result<bool, SimpleError> {
let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
match kubectl_exec_get_daemonset(
kubernetes_config,
"aws-node",
self.namespace().as_str(),
None,
environment_variables,
) {
Ok(x) => {
if x.spec.is_none() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"spec was not found in json output while looking at daemonset {}",
&self.chart_info.name
)),
});
}
match x.spec.unwrap().selector.match_labels.k8s_app {
Some(x) if x == "aws-node" => Ok(true),
_ => Ok(false),
}
}
Err(e) => {
let msg = format!(
"error while getting daemonset info for chart {}, won't deploy CNI chart. {:?}",
&self.chart_info.name, e
);
error!("{}", &msg);
Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
})
}
}
}
fn enable_cni_managed_by_helm(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> bool {
let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
@@ -1097,7 +1269,7 @@ impl AwsVpcCniChart {
environment_variables,
) {
Ok(x) => {
if x.items.is_empty() {
if x.items.is_some() && x.items.unwrap().is_empty() {
true
} else {
false
@@ -1105,7 +1277,7 @@ impl AwsVpcCniChart {
}
Err(e) => {
error!(
"error while getting daemonset info for chart {}, won't deploy CNI cahrt. {:?}",
"error while getting daemonset info for chart {}, won't deploy CNI chart. {:?}",
&self.chart_info.name, e
);
false

View File

@@ -2,14 +2,14 @@ use std::env;
use std::str::FromStr;
use itertools::Itertools;
use retry::delay::Fibonacci;
use retry::delay::{Fibonacci, Fixed};
use retry::Error::Operation;
use retry::OperationResult;
use rusoto_core::Region;
use serde::{Deserialize, Serialize};
use tera::Context as TeraContext;
use crate::cloud_provider::aws::kubernetes::helm_charts::aws_helm_charts;
use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::node::Node;
use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create;
use crate::cloud_provider::aws::AWS;
@@ -48,7 +48,7 @@ pub mod node;
pub mod roles;
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Options {
pub struct InfraOptions {
// AWS related
pub eks_zone_a_subnet_blocks: Vec<String>,
pub eks_zone_b_subnet_blocks: Vec<String>,
@@ -98,7 +98,7 @@ pub struct EKS<'a> {
s3: S3,
nodes: Vec<Node>,
template_directory: String,
options: Options,
options: InfraOptions,
listeners: Listeners,
}
@@ -111,7 +111,7 @@ impl<'a> EKS<'a> {
region: &str,
cloud_provider: &'a AWS,
dns_provider: &'a dyn DnsProvider,
options: Options,
options: InfraOptions,
nodes: Vec<Node>,
) -> Self {
let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir());
@@ -145,7 +145,26 @@ impl<'a> EKS<'a> {
format!("qovery-kubeconfigs-{}", self.id())
}
fn tera_context(&self) -> TeraContext {
fn managed_dns_resolvers_terraform_format(&self) -> String {
let managed_dns_resolvers: Vec<String> = self
.dns_provider
.resolvers()
.iter()
.map(|x| format!("{}", x.clone().to_string()))
.collect();
terraform_list_format(managed_dns_resolvers)
}
fn lets_encrypt_url(&self) -> String {
match &self.context.is_test_cluster() {
true => "https://acme-staging-v02.api.letsencrypt.org/directory",
false => "https://acme-v02.api.letsencrypt.org/directory",
}
.to_string()
}
fn tera_context(&self) -> Result<TeraContext, EngineError> {
let format_ips =
|ips: &Vec<String>| -> Vec<String> { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::<Vec<_>>() };
@@ -196,17 +215,9 @@ impl<'a> EKS<'a> {
let elasticsearch_cidr_subnet = self.options.elasticsearch_cidr_subnet.clone();
let managed_dns_list = vec![self.dns_provider.name()];
let managed_dns_domains_helm_format = vec![format!("\"{}\"", self.dns_provider.domain())];
let managed_dns_domains_helm_format = vec![format!("{}", self.dns_provider.domain())];
let managed_dns_domains_terraform_format = terraform_list_format(vec![self.dns_provider.domain().to_string()]);
let managed_dns_resolvers: Vec<String> = self
.dns_provider
.resolvers()
.iter()
.map(|x| format!("{}", x.clone().to_string()))
.collect();
let managed_dns_resolvers_terraform_format = terraform_list_format(managed_dns_resolvers);
let managed_dns_resolvers_terraform_format = self.managed_dns_resolvers_terraform_format();
let mut context = TeraContext::new();
// Qovery
@@ -257,7 +268,7 @@ impl<'a> EKS<'a> {
match self.dns_provider.kind() {
dns_provider::Kind::Cloudflare => {
context.insert("external_dns_provider", "cloudflare");
context.insert("external_dns_provider", self.dns_provider.provider_name());
context.insert("cloudflare_api_token", self.dns_provider.token());
context.insert("cloudflare_email", self.dns_provider.account());
}
@@ -266,11 +277,7 @@ impl<'a> EKS<'a> {
context.insert("dns_email_report", &self.options.tls_email_report); // Pierre suggested renaming to tls_email_report
// TLS
let lets_encrypt_url = match &self.context.is_test_cluster() {
true => "https://acme-staging-v02.api.letsencrypt.org/directory",
false => "https://acme-v02.api.letsencrypt.org/directory",
};
context.insert("acme_server_url", lets_encrypt_url);
context.insert("acme_server_url", &self.lets_encrypt_url());
// Vault
context.insert("vault_auth_method", "none");
@@ -296,6 +303,7 @@ impl<'a> EKS<'a> {
};
// Other Kubernetes
context.insert("kubernetes_cluster_name", &self.cluster_name());
context.insert("enable_cluster_autoscaler", &true);
// AWS
@@ -388,7 +396,7 @@ impl<'a> EKS<'a> {
context.insert("qovery_ssh_key", self.options.qovery_ssh_key.as_str());
context.insert("discord_api_key", self.options.discord_api_key.as_str());
context
Ok(context)
}
fn upgrade(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
@@ -427,7 +435,7 @@ impl<'a> EKS<'a> {
};
// generate terraform files and copy them into temp dir
let mut context = self.tera_context();
let mut context = self.tera_context()?;
//
// Upgrade master nodes
@@ -735,7 +743,7 @@ impl<'a> Kubernetes for EKS<'a> {
);
// generate terraform files and copy them into temp dir
let context = self.tera_context();
let context = self.tera_context()?;
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
@@ -786,11 +794,37 @@ impl<'a> Kubernetes for EKS<'a> {
.into_iter()
.map(|x| (x.0.to_string(), x.1.to_string()))
.collect();
let charts_prerequisites = ChartsConfigPrerequisites {
organization_id: self.cloud_provider.organization_id().to_string(),
infra_options: self.options.clone(),
cluster_id: self.id.clone(),
region: self.region().to_string(),
cluster_name: self.cluster_name().to_string(),
cloud_provider: "aws".to_string(),
test_cluster: self.context.is_test_cluster(),
aws_access_key_id: self.cloud_provider.access_key_id.to_string(),
aws_secret_access_key: self.cloud_provider.secret_access_key.to_string(),
ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory),
ff_metrics_history_enabled: self.context.is_feature_enabled(&Features::MetricsHistory),
managed_dns_helm_format: self.dns_provider.domain_helm_format(),
managed_dns_resolvers_terraform_format: self.managed_dns_resolvers_terraform_format(),
external_dns_provider: self.dns_provider.provider_name().to_string(),
dns_email_report: self.options.tls_email_report.clone(),
acme_url: self.lets_encrypt_url(),
cloudflare_email: self.dns_provider.account().to_string(),
cloudflare_api_token: self.dns_provider.token().to_string(),
};
let helm_charts_to_deploy = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
aws_helm_charts(format!("{}/qovery-tf-config.json", &temp_dir).as_str(), Some(&temp_dir)),
aws_helm_charts(
format!("{}/qovery-tf-config.json", &temp_dir).as_str(),
&charts_prerequisites,
Some(&temp_dir),
&kubeconfig,
&credentials_environment_variables,
),
)?;
cast_simple_error_to_engine_error(
@@ -876,7 +910,7 @@ impl<'a> Kubernetes for EKS<'a> {
);
// generate terraform files and copy them into temp dir
let mut context = self.tera_context();
let mut context = self.tera_context()?;
// pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc...
let worker_nodes: Vec<WorkerNodeDataTemplate> = Vec::new();
@@ -938,7 +972,7 @@ impl<'a> Kubernetes for EKS<'a> {
// pause: wait 1h for the engine to have 0 running jobs before pausing and avoid getting unreleased lock (from helm or terraform for example)
let metric_name = "taskmanager_nb_running_tasks";
let wait_engine_job_finish = retry::retry(Fibonacci::from_millis(60000).take(60), || {
let wait_engine_job_finish = retry::retry(Fixed::from_millis(60000).take(60), || {
return match kubectl_exec_api_custom_metrics(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
@@ -1058,7 +1092,7 @@ impl<'a> Kubernetes for EKS<'a> {
);
// generate terraform files and copy them into temp dir
let context = self.tera_context();
let context = self.tera_context()?;
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),

View File

@@ -30,6 +30,12 @@ pub struct ChartSetValue {
pub value: String,
}
#[derive(Clone)]
pub struct ChartValuesGenerated {
pub filename: String,
pub yaml_content: String,
}
#[derive(Clone)]
pub struct ChartInfo {
pub name: String,
@@ -43,6 +49,7 @@ pub struct ChartInfo {
pub wait: bool,
pub values: Vec<ChartSetValue>,
pub values_files: Vec<String>,
pub yaml_files_content: Vec<ChartValuesGenerated>,
}
impl Default for ChartInfo {
@@ -54,11 +61,12 @@ impl Default for ChartInfo {
action: Deploy,
atomic: true,
force_upgrade: false,
timeout: "300s".to_string(),
timeout: "180s".to_string(),
dry_run: false,
wait: true,
values: Vec::new(),
values_files: Vec::new(),
yaml_files_content: vec![],
}
}
}
@@ -115,7 +123,7 @@ pub trait HelmChart: Send {
"Error while deploying chart: {:?}",
e.message.clone().expect("no message provided")
);
self.on_deploy_failure(&kubernetes_config, &envs);
self.on_deploy_failure(&kubernetes_config, &envs)?;
return Err(e);
}
};
@@ -227,48 +235,55 @@ impl HelmChart for CoreDNSConfigChart {
environment_variables.push(("KUBECONFIG", kubernetes_config.to_str().unwrap()));
info!("setting annotations and labels on {}/{}", &kind, &self.chart_info.name);
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
"meta.helm.sh/release-namespace=kube-system",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
&kind,
&self.chart_info.name,
"app.kubernetes.io/managed-by=Helm",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
let steps = || -> Result<(), SimpleError> {
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
"meta.helm.sh/release-namespace=kube-system",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
&kind,
&self.chart_info.name,
"app.kubernetes.io/managed-by=Helm",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
Ok(())
};
if let Err(e) = steps() {
return Err(e);
};
Ok(())
}

View File

@@ -35,6 +35,9 @@ pub trait Kubernetes: Listen {
fn name_with_id(&self) -> String {
format!("{} ({})", self.name(), self.id())
}
fn cluster_name(&self) -> String {
format!("qovery-{}", self.id())
}
fn version(&self) -> &str;
fn region(&self) -> &str;
fn cloud_provider(&self) -> &dyn CloudProvider;

View File

@@ -15,6 +15,7 @@ pub mod helm;
pub mod kubernetes;
pub mod metrics;
pub mod models;
pub mod qovery;
pub mod service;
pub mod utilities;

View File

@@ -0,0 +1,56 @@
use reqwest::{header, Error};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct QoveryAgent {
pub kubernetes_id: String,
pub version: String,
pub object_type: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct QoveryEngine {
pub kubernetes_id: String,
pub version: String,
pub object_type: String,
}
pub enum QoveryApp {
QoveryAgent,
QoveryEngine,
}
pub enum QoveryAppName {
Agent,
Engine,
}
pub fn get_qovery_app_version<T: DeserializeOwned>(
qovery_app_type: QoveryAppName,
token: &str,
api_fqdn: &str,
cluster_id: &str,
) -> Result<T, Error> {
let mut headers = header::HeaderMap::new();
headers.insert("Content-Type", "application/json".parse().unwrap());
headers.insert("X-Qovery-Signature", token.parse().unwrap());
let app_type = match qovery_app_type {
QoveryAppName::Agent => "agent",
QoveryAppName::Engine => "engine",
};
let url = format!(
"https://{}/api/v1/{}-version?type=cluster&clusterId={}",
api_fqdn, app_type, cluster_id
);
match reqwest::blocking::Client::new().get(&url).headers(headers).send() {
Ok(x) => match x.json::<T>() {
Ok(qa) => Ok(qa),
Err(e) => Err(e),
},
Err(e) => Err(e),
}
}

View File

@@ -1,4 +1,4 @@
use std::io::Error;
use std::io::{Error, Write};
use std::path::Path;
use tracing::{error, info, span, Level};
@@ -8,6 +8,10 @@ use crate::cmd::structs::{Helm, HelmChart, HelmHistoryRow};
use crate::cmd::utilities::exec_with_envs_and_output;
use crate::error::{SimpleError, SimpleErrorKind};
use chrono::Duration;
use retry::delay::Fixed;
use retry::Error::Operation;
use retry::OperationResult;
use std::fs::File;
const HELM_DEFAULT_TIMEOUT_IN_SECONDS: u32 = 300;
@@ -66,10 +70,10 @@ pub fn helm_exec_upgrade_with_chart_info<P>(
where
P: AsRef<Path>,
{
let debug = false;
let mut args_string: Vec<String> = vec![
"upgrade",
"-o",
"json",
"--kubeconfig",
kubernetes_config.as_ref().to_str().unwrap(),
"--create-namespace",
@@ -83,6 +87,10 @@ where
.map(|x| x.to_string())
.collect();
if debug {
args_string.push("-o".to_string());
args_string.push("json".to_string());
}
// warn: don't add debug or json output won't work
if chart.atomic {
args_string.push("--atomic".to_string())
@@ -106,64 +114,96 @@ where
args_string.push("-f".to_string());
args_string.push(value_file.clone());
}
for value_file in &chart.yaml_files_content {
let file_path = format!("{}/{}", chart.path, &value_file.filename);
let file_create = || -> Result<(), Error> {
let mut file = File::create(&file_path)?;
file.write_all(value_file.yaml_content.as_bytes())?;
Ok(())
};
// no need to validate yaml as it will be done by helm
if let Err(e) = file_create() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while writing yaml content to file {}\n{}\n{}",
&file_path, value_file.yaml_content, e
)),
});
};
args_string.push("-f".to_string());
args_string.push(file_path.clone());
}
// add last elements
args_string.push(chart.name.to_string());
args_string.push(chart.path.to_string());
let args = args_string.iter().map(|x| x.as_str()).collect();
let mut json_output_string = String::new();
let mut error_message = String::new();
let mut helm_error_during_deployment = SimpleError {
kind: SimpleErrorKind::Other,
message: None,
};
match helm_exec_with_output(
args,
envs.clone(),
|out| match out {
Ok(line) => json_output_string = line,
Err(err) => error!("{}", &err),
},
|out| match out {
Ok(line) => {
// helm errors are not json formatted unfortunately
if line.contains("has been rolled back") {
error_message = format!("deployment {} has been rolled back", chart.name);
let result = retry::retry(Fixed::from_millis(15000).take(3), || {
let args = args_string.iter().map(|x| x.as_str()).collect();
let mut helm_error_during_deployment = SimpleError {
kind: SimpleErrorKind::Other,
message: None,
};
match helm_exec_with_output(
args,
envs.clone(),
|out| match out {
Ok(line) => {
if debug {
debug!("{}", line);
}
json_output_string = line
}
Err(err) => error!("{}", &err),
},
|out| match out {
Ok(line) => {
// helm errors are not json formatted unfortunately
if line.contains("has been rolled back") {
error_message = format!("deployment {} has been rolled back", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
} else if line.contains("has been uninstalled") {
error_message = format!("deployment {} has been uninstalled due to failure", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
// special fix for prometheus operator
} else if line.contains("info: skipping unknown hook: \"crd-install\"") {
debug!("chart {}: {}", chart.name, line);
} else {
error_message = format!("deployment {} has failed", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
error!("{}. {}", &error_message, &line);
}
}
Err(err) => {
error_message = format!("helm chart {} failed before deployment. {:?}", chart.name, err);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
} else if line.contains("has been uninstalled") {
error_message = format!("deployment {} has been uninstalled due to failure", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
error!("{}", error_message);
}
},
) {
Ok(_) => {
if helm_error_during_deployment.message.is_some() {
OperationResult::Retry(helm_error_during_deployment)
} else {
error_message = format!("deployment {} has failed", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
error!("{}. {}", &error_message, &line);
OperationResult::Ok(())
}
}
Err(err) => {
error_message = format!("helm chart {} failed before deployment. {:?}", chart.name, err);
helm_error_during_deployment.message = Some(error_message.clone());
error!("{}", error_message);
}
},
) {
Ok(_) => {
if helm_error_during_deployment.message.is_some() {
return Err(helm_error_during_deployment);
}
Err(e) => OperationResult::Retry(e),
}
Err(e) => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("{}. {:?}", error_message, e.message)),
})
}
}
});
Ok(())
match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(e)) => return Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
}
}
pub fn helm_exec_upgrade<P>(

View File

@@ -15,7 +15,6 @@ use crate::cmd::structs::{
use crate::cmd::utilities::exec_with_envs_and_output;
use crate::constants::KUBECONFIG;
use crate::error::{SimpleError, SimpleErrorKind};
use itertools::Itertools;
pub enum ScalingKind {
Deployment,

View File

@@ -42,12 +42,32 @@ pub struct Metadata {
pub uid: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Daemonset {
pub api_version: String,
pub items: Vec<Item>,
pub items: Option<Vec<Item>>,
pub kind: String,
pub spec: Option<Spec>,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Spec {
pub selector: Selector,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Selector {
pub match_labels: MatchLabels,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MatchLabels {
#[serde(rename = "k8s-app")]
pub k8s_app: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]

View File

@@ -38,6 +38,10 @@ impl DnsProvider for Cloudflare {
&self.context
}
fn provider_name(&self) -> &str {
"cloudflare"
}
fn kind(&self) -> Kind {
Kind::Cloudflare
}

View File

@@ -9,6 +9,7 @@ pub mod cloudflare;
pub trait DnsProvider {
fn context(&self) -> &Context;
fn provider_name(&self) -> &str;
fn kind(&self) -> Kind;
fn id(&self) -> &str;
fn name(&self) -> &str;
@@ -18,6 +19,9 @@ pub trait DnsProvider {
fn account(&self) -> &str;
fn token(&self) -> &str;
fn domain(&self) -> &str;
fn domain_helm_format(&self) -> String {
format!("{{{}}}", self.domain())
}
fn resolvers(&self) -> Vec<Ipv4Addr>;
fn is_valid(&self) -> Result<(), EngineError>;
fn engine_error_scope(&self) -> EngineErrorScope {

View File

@@ -33,18 +33,6 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "arrayref"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
[[package]]
name = "arrayvec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "async-trait"
version = "0.1.42"
@@ -115,17 +103,6 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "blake2b_simd"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587"
dependencies = [
"arrayref",
"arrayvec",
"constant_time_eq",
]
[[package]]
name = "block-buffer"
version = "0.7.3"
@@ -262,9 +239,28 @@ dependencies = [
[[package]]
name = "cmd_lib"
version = "0.7.8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "475bd7aa7680b4ed8f6bb59745e882bcbaeb39326532bb79ffb1716480d9a274"
checksum = "c7114bcaf989b09f9fb057b7d7c81491bc1558e8a67bd49bf7f682318d8a6501"
dependencies = [
"cmd_lib_macros",
"faccess",
"lazy_static",
"log",
"os_pipe",
]
[[package]]
name = "cmd_lib_macros"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "356fd654e9a433e730a826d4e01ea2414ab122ca8a5374eadf6c43a6090f8dd3"
dependencies = [
"proc-macro-error",
"proc-macro2 1.0.24",
"quote 1.0.8",
"syn 1.0.56",
]
[[package]]
name = "const_fn"
@@ -272,12 +268,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab"
[[package]]
name = "constant_time_eq"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
[[package]]
name = "cookie"
version = "0.12.0"
@@ -515,9 +505,9 @@ dependencies = [
[[package]]
name = "dirs"
version = "3.0.1"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "142995ed02755914747cc6ca76fc7e4583cd18578746716d0508ea6ed558b9ff"
checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309"
dependencies = [
"dirs-sys",
]
@@ -534,12 +524,12 @@ dependencies = [
[[package]]
name = "dirs-sys"
version = "0.3.5"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a"
checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780"
dependencies = [
"libc",
"redox_users 0.3.5",
"redox_users",
"winapi 0.3.9",
]
@@ -550,7 +540,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
dependencies = [
"libc",
"redox_users 0.4.0",
"redox_users",
"winapi 0.3.9",
]
@@ -608,6 +598,17 @@ dependencies = [
"version_check",
]
[[package]]
name = "faccess"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e039175679baf763ddddf4f76900b92d4dae9411ee88cf42d2f11b976b09e07c"
dependencies = [
"bitflags",
"libc",
"winapi 0.3.9",
]
[[package]]
name = "failure"
version = "0.1.8"
@@ -650,9 +651,9 @@ dependencies = [
[[package]]
name = "flate2"
version = "1.0.19"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0"
dependencies = [
"cfg-if 1.0.0",
"crc32fast",
@@ -913,9 +914,9 @@ checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
[[package]]
name = "git2"
version = "0.13.12"
version = "0.13.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca6f1a0238d7f8f8fd5ee642f4ebac4dbc03e03d1f78fbe7a3ede35dcf7e2224"
checksum = "d9831e983241f8c5591ed53f17d874833e2fa82cac2625f3888c50cbfe136cba"
dependencies = [
"bitflags",
"libc",
@@ -1339,9 +1340,9 @@ checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135"
[[package]]
name = "itertools"
version = "0.9.0"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf"
dependencies = [
"either",
]
@@ -1394,9 +1395,9 @@ checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
[[package]]
name = "libgit2-sys"
version = "0.12.14+1.1.0"
version = "0.12.21+1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f25af58e6495f7caf2919d08f212de550cfa3ed2f5e744988938ea292b9f549"
checksum = "86271bacd72b2b9e854c3dcfb82efd538f15f870e4c11af66900effb462f6825"
dependencies = [
"cc",
"libc",
@@ -1767,6 +1768,16 @@ dependencies = [
"vcpkg",
]
[[package]]
name = "os_pipe"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb233f06c2307e1f5ce2ecad9f8121cffbbee2c95428f44ea85222e460d0d213"
dependencies = [
"libc",
"winapi 0.3.9",
]
[[package]]
name = "parking_lot"
version = "0.9.0"
@@ -1952,6 +1963,30 @@ version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2 1.0.24",
"quote 1.0.8",
"syn 1.0.56",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2 1.0.24",
"quote 1.0.8",
"version_check",
]
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
@@ -1999,7 +2034,7 @@ dependencies = [
name = "qovery-engine"
version = "0.1.0"
dependencies = [
"base64 0.12.3",
"base64 0.13.0",
"chrono",
"cmd_lib",
"digitalocean",
@@ -2319,17 +2354,6 @@ dependencies = [
"bitflags",
]
[[package]]
name = "redox_users"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d"
dependencies = [
"getrandom 0.1.16",
"redox_syscall 0.1.57",
"rust-argon2",
]
[[package]]
name = "redox_users"
version = "0.4.0"
@@ -2618,18 +2642,6 @@ dependencies = [
"xml-rs",
]
[[package]]
name = "rust-argon2"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb"
dependencies = [
"base64 0.13.0",
"blake2b_simd",
"constant_time_eq",
"crossbeam-utils 0.8.4",
]
[[package]]
name = "rust-crypto"
version = "0.2.36"
@@ -3026,13 +3038,12 @@ dependencies = [
[[package]]
name = "tar"
version = "0.4.30"
version = "0.4.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "489997b7557e9a43e192c527face4feacc78bfbe6eed67fd55c4c9e381cba290"
checksum = "7d779dc6aeff029314570f666ec83f19df7280bb36ef338442cfa8c604021b80"
dependencies = [
"filetime",
"libc",
"redox_syscall 0.1.57",
"xattr",
]
@@ -3727,9 +3738,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "walkdir"
version = "2.3.1"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
dependencies = [
"same-file",
"winapi 0.3.9",

View File

@@ -1,10 +1,11 @@
extern crate serde;
extern crate serde_derive;
use tracing::error;
use chrono::Utc;
use qovery_engine::cloud_provider::aws::kubernetes::node::Node;
use qovery_engine::cloud_provider::aws::kubernetes::{Options, EKS};
use qovery_engine::cloud_provider::aws::kubernetes::{InfraOptions, EKS};
use qovery_engine::cloud_provider::aws::AWS;
use qovery_engine::cloud_provider::utilities::sanitize_name;
use qovery_engine::cloud_provider::TerraformStateCredentials;
@@ -35,6 +36,14 @@ pub fn execution_id() -> String {
pub fn container_registry_ecr(context: &Context) -> ECR {
let secrets = FuncTestsSecrets::new();
if secrets.AWS_ACCESS_KEY_ID.is_none()
|| secrets.AWS_SECRET_ACCESS_KEY.is_none()
|| secrets.AWS_DEFAULT_REGION.is_none()
{
error!("Please check your Vault connectivity (token/address) or AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY/AWS_DEFAULT_REGION envrionment variables are set");
std::process::exit(1)
}
ECR::new(
context.clone(),
"default-ecr-registry-Qovery Test",
@@ -87,8 +96,8 @@ pub fn cloud_provider_aws(context: &Context) -> AWS {
)
}
pub fn eks_options(secrets: FuncTestsSecrets) -> Options {
Options {
pub fn eks_options(secrets: FuncTestsSecrets) -> InfraOptions {
InfraOptions {
eks_zone_a_subnet_blocks: vec![
"10.0.0.0/23".to_string(),
"10.0.2.0/23".to_string(),

View File

@@ -183,7 +183,13 @@ impl FuncTestsSecrets {
}
};
let client = hashicorp_vault::Client::new(vault_config.address, vault_config.token).unwrap();
let client = match hashicorp_vault::Client::new(vault_config.address, vault_config.token) {
Ok(x) => x,
Err(e) => {
println!("error: wasn't able to contact Vault server. {:?}", e);
return empty_secrets;
}
};
let res: Result<FuncTestsSecrets, _> = client.get_custom_secret(secret_name);
match res {
Ok(x) => x,

View File

@@ -27,7 +27,6 @@ use tracing::{span, Level};
// (context_vec, env_vec)
// }
// TODO:
// - Tests that applications are always restarted when recieving a CREATE action
// see: https://github.com/Qovery/engine/pull/269

View File

@@ -111,8 +111,6 @@ fn create_upgrade_and_destroy_eks_cluster(
};
// Destroy
// There is a bug with the current version of Terraform (0.14.10) where the destroy fails, but it works
// It doesn't find any helm charts after destroying the workers and charts have already been destroyed
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
@@ -188,8 +186,6 @@ fn create_and_destroy_eks_cluster(region: &str, secrets: FuncTestsSecrets, test_
}
// Destroy
// There is a bug with the current version of Terraform (0.14.10) where the destroy fails, but it works
// It doesn't find any helm charts after destroying the workers and charts have already been destroyed
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
@@ -216,7 +212,7 @@ fn create_and_destroy_eks_cluster_in_eu_west_3() {
create_and_destroy_eks_cluster(
&region,
secrets,
true,
false,
&format!("create_and_destroy_eks_cluster_in_{}", region.replace("-", "_")),
);
}
@@ -229,7 +225,7 @@ fn create_and_destroy_eks_cluster_in_us_east_2() {
create_and_destroy_eks_cluster(
&region,
secrets,
false,
true,
&format!("create_and_destroy_eks_cluster_in_{}", region.replace("-", "_")),
);
}