feat: add qovery dns tests support

This commit is contained in:
Pierre Mavro
2022-05-10 18:03:40 +02:00
parent 600b64f3f9
commit fc92707d26
15 changed files with 151 additions and 46 deletions

View File

@@ -1,7 +1,8 @@
# randomize inbound kubernetes port number for more security
resource "random_integer" "kubernetes_external_port" {
min = 1024
max = 65534
# not more to avoid k3s and Kubernetes port overlap
max = 9999
}
resource "aws_security_group" "ec2_instance" {

View File

@@ -21,6 +21,21 @@ resource "aws_key_pair" "qovery_ssh_key" {
public_key = "{{ qovery_ssh_key }}"
}
resource "aws_ebs_volume" "ebs_disk1" {
availability_zone = aws_subnet.ec2_zone_a[0].availability_zone
size = var.ec2_instance.disk_size_in_gb
type = "gp2"
encrypted = true
tags = local.tags_common
}
resource "aws_volume_attachment" "ebs_disk1" {
device_name = "/dev/sdq"
volume_id = aws_ebs_volume.ebs_disk1.id
instance_id = aws_instance.ec2_instance.id
force_detach = true
}
resource "aws_instance" "ec2_instance" {
ami = data.aws_ami.debian.id
instance_type = var.ec2_instance.instance_type
@@ -44,11 +59,7 @@ resource "aws_instance" "ec2_instance" {
# k3s install
user_data = local.bootstrap
user_data_replace_on_change = false
lifecycle {
// avoid user data changes, forces to restart the EC2 instance
ignore_changes = [user_data]
}
user_data_replace_on_change = true
tags = merge(
local.tags_common,
@@ -58,7 +69,8 @@ resource "aws_instance" "ec2_instance" {
)
depends_on = [
aws_s3_bucket.kubeconfigs_bucket
aws_s3_bucket.kubeconfigs_bucket,
aws_ebs_volume.ebs_disk1
]
}
@@ -68,16 +80,53 @@ locals {
bootstrap = <<BOOTSTRAP
#!/bin/bash
function print_title() {
echo -e "\n######### $1 #########\n"
}
# enable logs to file and console
exec > >(tee ${var.ec2_instance.user_data_logs_path}|logger -t user-data -s 2>/dev/console) 2>&1
export KUBECONFIG_FILENAME="${var.kubernetes_cluster_id}.yaml"
export NEW_KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME"
print_title "Install packages"
apt-get update
apt-get -y install curl s3cmd
apt-get -y install curl s3cmd parted
print_title "Prepare Rancher dedicated data disk"
disk_device="$(lsblk -r | grep disk | grep ${var.ec2_instance.disk_size_in_gb}G | tail -1 | awk '{ print $1 }')"
echo "disk_device: $disk_device"
disk_device_path="/dev/$disk_device"
echo "disk_device_path: $disk_device_path"
if [ $(lsblk -r | grep $disk_device | grep part | grep -c $disk_device) -eq 0 ] ; then
echo "No partition found, erasing disk"
parted -s -a optimal $disk_device_path mklabel gpt
parted -s -a optimal $disk_device_path mkpart primary ext4 0% 100%
partprobe $disk_device_path
sleep 5
export partition_path="/dev/$(lsblk -r | grep $disk_device | awk '/part/{print $1}')"
echo "partition_path: $partition_path"
mkfs.ext4 $partition_path
sleep 2
else
echo "Partition already exists, not erasing"
fi
echo "$partition_path /var/lib/rancher ext4 rw,discard 0 0" >> /etc/fstab
mkdir -p /var/lib/rancher
mount /var/lib/rancher
sleep 2
if [ $(df | grep -c '/var/lib/rancher') -eq 0 ] ; then
echo "No data disk was able to be mounted, can't continue"
exit 1
fi
print_title "Install k3s"
export INSTALL_K3S_VERSION=${var.k3s_config.version}
export INSTALL_K3S_CHANNEL=${var.k3s_config.channel}
export INSTALL_K3S_EXEC="--https-listen-port=${random_integer.kubernetes_external_port.result} ${var.k3s_config.exec}"
curl -sfL https://get.k3s.io | sh -
echo "k3s agrs: $INSTALL_K3S_EXEC"
curl -fL https://get.k3s.io | sh -
echo 'export KUBECONFIG=/etc/rancher/k3s/k3s.yaml' >> /etc/profile
while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do
@@ -85,6 +134,7 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do
sleep 1
done
print_title "Push Kubeconfig to S3"
# Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap
public_hostname="$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)"
sed "s/127.0.0.1/$public_hostname/g" /etc/rancher/k3s/k3s.yaml > $NEW_KUBECONFIG_PATH

View File

@@ -72,6 +72,7 @@ variable "ec2_instance" {
default = {
"instance_type" = "{{ eks_worker_nodes[0].instance_type }}"
"disk_size_in_gb" = "{{ eks_worker_nodes[0].disk_size_in_gib }}"
"user_data_logs_path" = "/var/log/user-data.log" # install error logs location
}
type = map(string)
}

View File

@@ -279,7 +279,7 @@ pub fn ec2_aws_helm_charts(
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
value: "100Mi".to_string(),
},
];
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, Some(cluster_agent_resources))?;
@@ -300,7 +300,7 @@ pub fn ec2_aws_helm_charts(
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
value: "100Mi".to_string(),
},
];
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, Some(shell_agent_resources))?;

View File

@@ -957,7 +957,7 @@ pub fn get_chart_for_cert_manager_config(
value: x.cloudflare_email.clone(),
})
}
DnsProviderConfiguration::QoveryDns(x) => {}
DnsProviderConfiguration::QoveryDns(_) => {}
};
cert_manager_config

View File

@@ -1266,6 +1266,13 @@ impl NodeGroups {
disk_size_in_gib,
})
}
pub fn to_ec2_instance(&self) -> InstanceEc2 {
InstanceEc2 {
instance_type: self.instance_type.clone(),
disk_size_in_gib: self.disk_size_in_gib,
}
}
}
impl InstanceEc2 {

View File

@@ -10,7 +10,6 @@ use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide;
use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials};
use qovery_engine::container_registry::ecr::ECR;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::engine::EngineConfig;
use qovery_engine::io_models::{Context, NoOpProgressListener};
use qovery_engine::logger::Logger;
@@ -18,8 +17,8 @@ use std::str::FromStr;
use std::sync::Arc;
use tracing::error;
use crate::cloudflare::dns_provider_cloudflare;
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain};
use crate::dns::{dns_provider_cloudflare, dns_provider_qoverydns};
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3;
@@ -88,7 +87,10 @@ impl Cluster<AWS, Options> for AWS {
// use AWS
let cloud_provider: Arc<Box<dyn CloudProvider>> = Arc::new(AWS::cloud_provider(context));
let dns_provider: Arc<Box<dyn DnsProvider>> = Arc::new(dns_provider_cloudflare(context, cluster_domain));
let dns_provider = match kubernetes_kind.clone() {
KubernetesKind::Ec2 => Arc::new(dns_provider_qoverydns(context, cluster_domain)),
_ => Arc::new(dns_provider_cloudflare(context, cluster_domain)),
};
let kubernetes = get_environment_test_kubernetes(
context,

View File

@@ -1,25 +0,0 @@
use crate::common::ClusterDomain;
use crate::utilities::FuncTestsSecrets;
use qovery_engine::dns_provider::cloudflare::Cloudflare;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::io_models::{Context, Domain};
pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box<dyn DnsProvider> {
let secrets = FuncTestsSecrets::new();
let domain = Domain::new(match domain {
ClusterDomain::Custom(domain) => domain.to_string(),
ClusterDomain::Default { cluster_id } => format!(
"{}.{}",
cluster_id,
secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set")
),
});
Box::new(Cloudflare::new(
context.clone(),
"qoverytestdnsclo",
"Qovery Test Cloudflare",
domain,
secrets.CLOUDFLARE_TOKEN.expect("CLOUDFLARE_TOKEN is not set").as_str(), // Cloudflare name: Qovery test
secrets.CLOUDFLARE_ID.expect("CLOUDFLARE_ID is not set").as_str(),
))
}

View File

@@ -11,8 +11,8 @@ use qovery_engine::engine::EngineConfig;
use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener};
use std::sync::Arc;
use crate::cloudflare::dns_provider_cloudflare;
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain};
use crate::dns::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
use qovery_engine::cloud_provider::qovery::EngineLocation;
use qovery_engine::dns_provider::DnsProvider;

56
test_utilities/src/dns.rs Normal file
View File

@@ -0,0 +1,56 @@
use crate::common::ClusterDomain;
use crate::utilities::FuncTestsSecrets;
use qovery_engine::dns_provider::cloudflare::Cloudflare;
use qovery_engine::dns_provider::qoverydns::QoveryDns;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::io_models::{Context, Domain};
pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box<dyn DnsProvider> {
let secrets = FuncTestsSecrets::new();
let domain = Domain::new(match domain {
ClusterDomain::Custom(domain) => domain.to_string(),
ClusterDomain::Default { cluster_id } => format!(
"{}.{}",
cluster_id,
secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set")
),
});
Box::new(Cloudflare::new(
context.clone(),
"qoverytestdnsclo",
"Qovery Test Cloudflare",
domain,
secrets.CLOUDFLARE_TOKEN.expect("CLOUDFLARE_TOKEN is not set").as_str(), // Cloudflare name: Qovery test
secrets.CLOUDFLARE_ID.expect("CLOUDFLARE_ID is not set").as_str(),
))
}
pub fn dns_provider_qoverydns(context: &Context, domain: &ClusterDomain) -> Box<dyn DnsProvider> {
let secrets = FuncTestsSecrets::new();
let domain = Domain::new(match domain {
ClusterDomain::Custom(domain) => domain.to_string(),
ClusterDomain::Default { cluster_id } => format!(
"{}.{}",
cluster_id,
secrets.CLOUDFLARE_DOMAIN.expect("QOVERYDNS_DOMAIN is not set")
),
});
Box::new(QoveryDns::new(
context.clone(),
"qoverytestdnsqdns",
secrets
.QOVERY_DNS_API_URL
.expect("QOVERY_DNS_API_URL is not set")
.as_str(),
secrets
.QOVERY_DNS_API_PORT
.expect("QOVERY_DNS_API_PORT is not set")
.as_str(),
secrets
.QOVERY_DNS_API_KEY
.expect("QOVERY_DNS_API_KEY is not set")
.as_str(),
"Qovery Test QoveryDNS",
domain,
))
}

View File

@@ -5,9 +5,9 @@ extern crate maplit;
pub mod aws;
pub mod aws_ec2;
pub mod cloudflare;
pub mod common;
pub mod digitalocean;
pub mod dns;
pub mod edge_aws_rs;
pub mod scaleway;
pub mod utilities;

View File

@@ -21,8 +21,8 @@ use qovery_engine::logger::Logger;
use qovery_engine::models::scaleway::ScwZone;
use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS};
use crate::cloudflare::dns_provider_cloudflare;
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain};
use crate::dns::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets};
pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2;

View File

@@ -149,6 +149,10 @@ pub struct FuncTestsSecrets {
pub QOVERY_GRPC_URL: Option<String>,
pub QOVERY_CLUSTER_SECRET_TOKEN: Option<String>,
pub QOVERY_CLUSTER_JWT_TOKEN: Option<String>,
pub QOVERY_DNS_API_URL: Option<String>,
pub QOVERY_DNS_API_PORT: Option<String>,
pub QOVERY_DNS_API_KEY: Option<String>,
pub QOVERY_DNS_DOMAIN: Option<String>,
}
struct VaultConfig {
@@ -240,6 +244,10 @@ impl FuncTestsSecrets {
QOVERY_GRPC_URL: None,
QOVERY_CLUSTER_SECRET_TOKEN: None,
QOVERY_CLUSTER_JWT_TOKEN: None,
QOVERY_DNS_API_URL: None,
QOVERY_DNS_API_PORT: None,
QOVERY_DNS_API_KEY: None,
QOVERY_DNS_DOMAIN: None,
};
let vault_config = match Self::get_vault_config() {
@@ -361,6 +369,10 @@ impl FuncTestsSecrets {
secrets.QOVERY_CLUSTER_SECRET_TOKEN,
),
QOVERY_CLUSTER_JWT_TOKEN: Self::select_secret("QOVERY_CLUSTER_JWT_TOKEN", secrets.QOVERY_CLUSTER_JWT_TOKEN),
QOVERY_DNS_API_URL: Self::select_secret("QOVERY_DNS_API_URL", secrets.QOVERY_DNS_API_URL),
QOVERY_DNS_API_PORT: Self::select_secret("QOVERY_DNS_API_PORT", secrets.QOVERY_DNS_API_PORT),
QOVERY_DNS_API_KEY: Self::select_secret("QOVERY_DNS_API_KEY", secrets.QOVERY_DNS_API_KEY),
QOVERY_DNS_DOMAIN: Self::select_secret("QOVERYDNS_DOMAIN", secrets.QOVERY_DNS_DOMAIN),
}
}
}

View File

@@ -1,6 +1,6 @@
extern crate test_utilities;
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger};
use self::test_utilities::utilities::{context, engine_run_test, generate_id, logger};
use ::function_name::named;
use qovery_engine::cloud_provider::kubernetes::Kind as KKind;
@@ -24,7 +24,7 @@ fn create_and_destroy_aws_ec2_k3s_cluster(
engine_run_test(|| {
let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region");
let zones = region.get_zones();
let cluster_id = generate_cluster_id(region.to_string().as_str());
let cluster_id = "ztest-abe3e22b0".to_string(); // don't change it to test qovery dns provider properly
cluster_test(
test_name,
Kind::Aws,

View File

@@ -1,4 +1,4 @@
use qovery_engine::cloud_provider::helm::{ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces};
use qovery_engine::cloud_provider::helm::{ChartInfo, ChartSetValue, CommonChart, HelmChartNamespaces};
use qovery_engine::cmd::helm::Helm;
use serde_derive::Deserialize;
@@ -81,6 +81,7 @@ pub struct Metadata2 {
pub self_link: String,
}
#[allow(dead_code)] // TODO(pmavro): fix this by using the correct tag
fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) {
let vault_secrets = FuncTestsSecrets::new();
let mut kube_config = dirs::home_dir().unwrap();