diff --git a/lib/aws-ec2/bootstrap/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf index eadb84ea..e2fa79f9 100644 --- a/lib/aws-ec2/bootstrap/ec2-sec-group.tf +++ b/lib/aws-ec2/bootstrap/ec2-sec-group.tf @@ -1,7 +1,8 @@ # randomize inbound kubernetes port number for more security resource "random_integer" "kubernetes_external_port" { min = 1024 - max = 65534 + # not more to avoid k3s and Kubernetes port overlap + max = 9999 } resource "aws_security_group" "ec2_instance" { diff --git a/lib/aws-ec2/bootstrap/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf index 8f5aac4c..4356f1cc 100644 --- a/lib/aws-ec2/bootstrap/ec2.j2.tf +++ b/lib/aws-ec2/bootstrap/ec2.j2.tf @@ -21,6 +21,21 @@ resource "aws_key_pair" "qovery_ssh_key" { public_key = "{{ qovery_ssh_key }}" } +resource "aws_ebs_volume" "ebs_disk1" { + availability_zone = aws_subnet.ec2_zone_a[0].availability_zone + size = var.ec2_instance.disk_size_in_gb + type = "gp2" + encrypted = true + tags = local.tags_common +} + +resource "aws_volume_attachment" "ebs_disk1" { + device_name = "/dev/sdq" + volume_id = aws_ebs_volume.ebs_disk1.id + instance_id = aws_instance.ec2_instance.id + force_detach = true +} + resource "aws_instance" "ec2_instance" { ami = data.aws_ami.debian.id instance_type = var.ec2_instance.instance_type @@ -44,11 +59,7 @@ resource "aws_instance" "ec2_instance" { # k3s install user_data = local.bootstrap - user_data_replace_on_change = false - lifecycle { - // avoid user data changes, forces to restart the EC2 instance - ignore_changes = [user_data] - } + user_data_replace_on_change = true tags = merge( local.tags_common, @@ -58,7 +69,8 @@ resource "aws_instance" "ec2_instance" { ) depends_on = [ - aws_s3_bucket.kubeconfigs_bucket + aws_s3_bucket.kubeconfigs_bucket, + aws_ebs_volume.ebs_disk1 ] } @@ -68,16 +80,53 @@ locals { bootstrap = < >(tee ${var.ec2_instance.user_data_logs_path}|logger -t user-data -s 2>/dev/console) 2>&1 + export KUBECONFIG_FILENAME="${var.kubernetes_cluster_id}.yaml" export NEW_KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME" +print_title "Install packages" apt-get update -apt-get -y install curl s3cmd +apt-get -y install curl s3cmd parted +print_title "Prepare Rancher dedicated data disk" +disk_device="$(lsblk -r | grep disk | grep ${var.ec2_instance.disk_size_in_gb}G | tail -1 | awk '{ print $1 }')" +echo "disk_device: $disk_device" +disk_device_path="/dev/$disk_device" +echo "disk_device_path: $disk_device_path" +if [ $(lsblk -r | grep $disk_device | grep part | grep -c $disk_device) -eq 0 ] ; then + echo "No partition found, erasing disk" + parted -s -a optimal $disk_device_path mklabel gpt + parted -s -a optimal $disk_device_path mkpart primary ext4 0% 100% + partprobe $disk_device_path + sleep 5 + export partition_path="/dev/$(lsblk -r | grep $disk_device | awk '/part/{print $1}')" + echo "partition_path: $partition_path" + mkfs.ext4 $partition_path + sleep 2 +else + echo "Partition already exists, not erasing" +fi +echo "$partition_path /var/lib/rancher ext4 rw,discard 0 0" >> /etc/fstab +mkdir -p /var/lib/rancher +mount /var/lib/rancher +sleep 2 +if [ $(df | grep -c '/var/lib/rancher') -eq 0 ] ; then + echo "No data disk was able to be mounted, can't continue" + exit 1 +fi + +print_title "Install k3s" export INSTALL_K3S_VERSION=${var.k3s_config.version} export INSTALL_K3S_CHANNEL=${var.k3s_config.channel} export INSTALL_K3S_EXEC="--https-listen-port=${random_integer.kubernetes_external_port.result} ${var.k3s_config.exec}" -curl -sfL https://get.k3s.io | sh - +echo "k3s agrs: $INSTALL_K3S_EXEC" +curl -fL https://get.k3s.io | sh - echo 'export KUBECONFIG=/etc/rancher/k3s/k3s.yaml' >> /etc/profile while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do @@ -85,6 +134,7 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do sleep 1 done +print_title "Push Kubeconfig to S3" # Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap public_hostname="$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)" sed "s/127.0.0.1/$public_hostname/g" /etc/rancher/k3s/k3s.yaml > $NEW_KUBECONFIG_PATH diff --git a/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf b/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf index b587bed6..8bc3b075 100644 --- a/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf +++ b/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf @@ -72,6 +72,7 @@ variable "ec2_instance" { default = { "instance_type" = "{{ eks_worker_nodes[0].instance_type }}" "disk_size_in_gb" = "{{ eks_worker_nodes[0].disk_size_in_gib }}" + "user_data_logs_path" = "/var/log/user-data.log" # install error logs location } type = map(string) } diff --git a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs index 481c99b3..878b233e 100644 --- a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs @@ -279,7 +279,7 @@ pub fn ec2_aws_helm_charts( }, ChartSetValue { key: "resources.limits.memory".to_string(), - value: "50Mi".to_string(), + value: "100Mi".to_string(), }, ]; let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, Some(cluster_agent_resources))?; @@ -300,7 +300,7 @@ pub fn ec2_aws_helm_charts( }, ChartSetValue { key: "resources.limits.memory".to_string(), - value: "50Mi".to_string(), + value: "100Mi".to_string(), }, ]; let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, Some(shell_agent_resources))?; diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index acabbc78..8a63f34f 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -957,7 +957,7 @@ pub fn get_chart_for_cert_manager_config( value: x.cloudflare_email.clone(), }) } - DnsProviderConfiguration::QoveryDns(x) => {} + DnsProviderConfiguration::QoveryDns(_) => {} }; cert_manager_config diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index b1b58b60..699c1b41 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -1266,6 +1266,13 @@ impl NodeGroups { disk_size_in_gib, }) } + + pub fn to_ec2_instance(&self) -> InstanceEc2 { + InstanceEc2 { + instance_type: self.instance_type.clone(), + disk_size_in_gib: self.disk_size_in_gib, + } + } } impl InstanceEc2 { diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 8baa3a54..d86f802f 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -10,7 +10,6 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::ecr::ECR; -use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; use qovery_engine::io_models::{Context, NoOpProgressListener}; use qovery_engine::logger::Logger; @@ -18,8 +17,8 @@ use std::str::FromStr; use std::sync::Arc; use tracing::error; -use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::dns::{dns_provider_cloudflare, dns_provider_qoverydns}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3; @@ -88,7 +87,10 @@ impl Cluster for AWS { // use AWS let cloud_provider: Arc> = Arc::new(AWS::cloud_provider(context)); - let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); + let dns_provider = match kubernetes_kind.clone() { + KubernetesKind::Ec2 => Arc::new(dns_provider_qoverydns(context, cluster_domain)), + _ => Arc::new(dns_provider_cloudflare(context, cluster_domain)), + }; let kubernetes = get_environment_test_kubernetes( context, diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs deleted file mode 100644 index d00e9de2..00000000 --- a/test_utilities/src/cloudflare.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::common::ClusterDomain; -use crate::utilities::FuncTestsSecrets; -use qovery_engine::dns_provider::cloudflare::Cloudflare; -use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::io_models::{Context, Domain}; - -pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { - let secrets = FuncTestsSecrets::new(); - let domain = Domain::new(match domain { - ClusterDomain::Custom(domain) => domain.to_string(), - ClusterDomain::Default { cluster_id } => format!( - "{}.{}", - cluster_id, - secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set") - ), - }); - Box::new(Cloudflare::new( - context.clone(), - "qoverytestdnsclo", - "Qovery Test Cloudflare", - domain, - secrets.CLOUDFLARE_TOKEN.expect("CLOUDFLARE_TOKEN is not set").as_str(), // Cloudflare name: Qovery test - secrets.CLOUDFLARE_ID.expect("CLOUDFLARE_ID is not set").as_str(), - )) -} diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 139bd489..23f1289d 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -11,8 +11,8 @@ use qovery_engine::engine::EngineConfig; use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; -use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::dns::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::dns_provider::DnsProvider; diff --git a/test_utilities/src/dns.rs b/test_utilities/src/dns.rs new file mode 100644 index 00000000..ca2797b5 --- /dev/null +++ b/test_utilities/src/dns.rs @@ -0,0 +1,56 @@ +use crate::common::ClusterDomain; +use crate::utilities::FuncTestsSecrets; +use qovery_engine::dns_provider::cloudflare::Cloudflare; +use qovery_engine::dns_provider::qoverydns::QoveryDns; +use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::io_models::{Context, Domain}; + +pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { + let secrets = FuncTestsSecrets::new(); + let domain = Domain::new(match domain { + ClusterDomain::Custom(domain) => domain.to_string(), + ClusterDomain::Default { cluster_id } => format!( + "{}.{}", + cluster_id, + secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set") + ), + }); + Box::new(Cloudflare::new( + context.clone(), + "qoverytestdnsclo", + "Qovery Test Cloudflare", + domain, + secrets.CLOUDFLARE_TOKEN.expect("CLOUDFLARE_TOKEN is not set").as_str(), // Cloudflare name: Qovery test + secrets.CLOUDFLARE_ID.expect("CLOUDFLARE_ID is not set").as_str(), + )) +} + +pub fn dns_provider_qoverydns(context: &Context, domain: &ClusterDomain) -> Box { + let secrets = FuncTestsSecrets::new(); + let domain = Domain::new(match domain { + ClusterDomain::Custom(domain) => domain.to_string(), + ClusterDomain::Default { cluster_id } => format!( + "{}.{}", + cluster_id, + secrets.CLOUDFLARE_DOMAIN.expect("QOVERYDNS_DOMAIN is not set") + ), + }); + Box::new(QoveryDns::new( + context.clone(), + "qoverytestdnsqdns", + secrets + .QOVERY_DNS_API_URL + .expect("QOVERY_DNS_API_URL is not set") + .as_str(), + secrets + .QOVERY_DNS_API_PORT + .expect("QOVERY_DNS_API_PORT is not set") + .as_str(), + secrets + .QOVERY_DNS_API_KEY + .expect("QOVERY_DNS_API_KEY is not set") + .as_str(), + "Qovery Test QoveryDNS", + domain, + )) +} diff --git a/test_utilities/src/lib.rs b/test_utilities/src/lib.rs index beca159f..86df313c 100644 --- a/test_utilities/src/lib.rs +++ b/test_utilities/src/lib.rs @@ -5,9 +5,9 @@ extern crate maplit; pub mod aws; pub mod aws_ec2; -pub mod cloudflare; pub mod common; pub mod digitalocean; +pub mod dns; pub mod edge_aws_rs; pub mod scaleway; pub mod utilities; diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 2ecf1c2c..dfdf851a 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -21,8 +21,8 @@ use qovery_engine::logger::Logger; use qovery_engine::models::scaleway::ScwZone; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; -use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::dns::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index c8d01869..0a721d19 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -149,6 +149,10 @@ pub struct FuncTestsSecrets { pub QOVERY_GRPC_URL: Option, pub QOVERY_CLUSTER_SECRET_TOKEN: Option, pub QOVERY_CLUSTER_JWT_TOKEN: Option, + pub QOVERY_DNS_API_URL: Option, + pub QOVERY_DNS_API_PORT: Option, + pub QOVERY_DNS_API_KEY: Option, + pub QOVERY_DNS_DOMAIN: Option, } struct VaultConfig { @@ -240,6 +244,10 @@ impl FuncTestsSecrets { QOVERY_GRPC_URL: None, QOVERY_CLUSTER_SECRET_TOKEN: None, QOVERY_CLUSTER_JWT_TOKEN: None, + QOVERY_DNS_API_URL: None, + QOVERY_DNS_API_PORT: None, + QOVERY_DNS_API_KEY: None, + QOVERY_DNS_DOMAIN: None, }; let vault_config = match Self::get_vault_config() { @@ -361,6 +369,10 @@ impl FuncTestsSecrets { secrets.QOVERY_CLUSTER_SECRET_TOKEN, ), QOVERY_CLUSTER_JWT_TOKEN: Self::select_secret("QOVERY_CLUSTER_JWT_TOKEN", secrets.QOVERY_CLUSTER_JWT_TOKEN), + QOVERY_DNS_API_URL: Self::select_secret("QOVERY_DNS_API_URL", secrets.QOVERY_DNS_API_URL), + QOVERY_DNS_API_PORT: Self::select_secret("QOVERY_DNS_API_PORT", secrets.QOVERY_DNS_API_PORT), + QOVERY_DNS_API_KEY: Self::select_secret("QOVERY_DNS_API_KEY", secrets.QOVERY_DNS_API_KEY), + QOVERY_DNS_DOMAIN: Self::select_secret("QOVERYDNS_DOMAIN", secrets.QOVERY_DNS_DOMAIN), } } } diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 5efc9fbc..848d4269 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -1,6 +1,6 @@ extern crate test_utilities; -use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +use self::test_utilities::utilities::{context, engine_run_test, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::kubernetes::Kind as KKind; @@ -24,7 +24,7 @@ fn create_and_destroy_aws_ec2_k3s_cluster( engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); let zones = region.get_zones(); - let cluster_id = generate_cluster_id(region.to_string().as_str()); + let cluster_id = "ztest-abe3e22b0".to_string(); // don't change it to test qovery dns provider properly cluster_test( test_name, Kind::Aws, diff --git a/tests/helm/cert_manager.rs b/tests/helm/cert_manager.rs index 11a8f048..4d750a74 100644 --- a/tests/helm/cert_manager.rs +++ b/tests/helm/cert_manager.rs @@ -1,4 +1,4 @@ -use qovery_engine::cloud_provider::helm::{ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces}; +use qovery_engine::cloud_provider::helm::{ChartInfo, ChartSetValue, CommonChart, HelmChartNamespaces}; use qovery_engine::cmd::helm::Helm; use serde_derive::Deserialize; @@ -81,6 +81,7 @@ pub struct Metadata2 { pub self_link: String, } +#[allow(dead_code)] // TODO(pmavro): fix this by using the correct tag fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) { let vault_secrets = FuncTestsSecrets::new(); let mut kube_config = dirs::home_dir().unwrap();