Feat/upgrade cluster tests (#483)

This commit is contained in:
MacLikorne
2021-11-24 11:26:18 +01:00
committed by GitHub
parent c3ea4024ad
commit a767761963
27 changed files with 2114 additions and 2005 deletions

21
Cargo.lock generated
View File

@@ -273,6 +273,26 @@ version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7"
[[package]]
name = "const_format"
version = "0.2.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22bc6cd49b0ec407b680c3e380182b6ac63b73991cb7602de350352fc309b614"
dependencies = [
"const_format_proc_macros",
]
[[package]]
name = "const_format_proc_macros"
version = "0.2.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef196d5d972878a48da7decb7686eded338b4858fbabeed513d63a7c98b2b82d"
dependencies = [
"proc-macro2 1.0.28",
"quote 1.0.9",
"unicode-xid 0.2.2",
]
[[package]]
name = "cookie"
version = "0.12.0"
@@ -3212,6 +3232,7 @@ dependencies = [
"base64 0.13.0",
"bstr",
"chrono",
"const_format",
"curl",
"digitalocean",
"dirs",

View File

@@ -13,12 +13,11 @@ use tera::Context as TeraContext;
use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType;
use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create;
use crate::cloud_provider::aws::AWS;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes,
KubernetesNodesType, KubernetesUpgradeStatus,
KubernetesNodesType, KubernetesUpgradeStatus, ProviderOptions,
};
use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat};
use crate::cloud_provider::qovery::EngineLocation;
@@ -108,6 +107,8 @@ pub struct Options {
pub tls_email_report: String,
}
impl ProviderOptions for Options {}
pub struct EKS<'a> {
context: Context,
id: String,
@@ -115,7 +116,7 @@ pub struct EKS<'a> {
name: String,
version: String,
region: Region,
cloud_provider: &'a AWS,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
s3: S3,
nodes_groups: Vec<NodeGroups>,
@@ -132,7 +133,7 @@ impl<'a> EKS<'a> {
name: &str,
version: &str,
region: &str,
cloud_provider: &'a AWS,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
options: Options,
nodes_groups: Vec<NodeGroups>,
@@ -147,7 +148,8 @@ impl<'a> EKS<'a> {
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
node_group.instance_type,
cloud_provider.name()
)),
));
}
@@ -158,8 +160,8 @@ impl<'a> EKS<'a> {
context.clone(),
"s3-temp-id".to_string(),
"default-s3".to_string(),
cloud_provider.access_key_id.clone(),
cloud_provider.secret_access_key.clone(),
cloud_provider.access_key_id().clone(),
cloud_provider.secret_access_key().clone(),
);
Ok(EKS {
@@ -175,7 +177,7 @@ impl<'a> EKS<'a> {
options,
nodes_groups,
template_directory,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
listeners: cloud_provider.listeners().clone(), // copy listeners from CloudProvider
})
}
@@ -380,8 +382,8 @@ impl<'a> EKS<'a> {
context.insert("enable_cluster_autoscaler", &true);
// AWS
context.insert("aws_access_key", &self.cloud_provider.access_key_id);
context.insert("aws_secret_key", &self.cloud_provider.secret_access_key);
context.insert("aws_access_key", &self.cloud_provider.access_key_id());
context.insert("aws_secret_key", &self.cloud_provider.secret_access_key());
// AWS S3 tfstate storage
context.insert(
@@ -474,18 +476,11 @@ impl<'a> EKS<'a> {
fn create(&self) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
let send_to_customer = |message: &str| {
listeners_helper.deployment_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(message),
self.context.execution_id(),
))
};
send_to_customer(format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str());
self.send_to_customer(
format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(),
&listeners_helper,
);
// upgrade cluster instead if required
match self.config_file() {
@@ -514,8 +509,8 @@ impl<'a> EKS<'a> {
let already_created_roles = get_default_roles_to_create();
for role in already_created_roles {
match role.create_service_linked_role(
self.cloud_provider.access_key_id.as_str(),
self.cloud_provider.secret_access_key.as_str(),
self.cloud_provider.access_key_id().as_str(),
self.cloud_provider.secret_access_key().as_str(),
) {
Ok(_) => info!("Role {} is already present, no need to create", role.role_name),
Err(e) => error!(
@@ -557,7 +552,10 @@ impl<'a> EKS<'a> {
),
)?;
send_to_customer(format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str());
self.send_to_customer(
format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(),
&listeners_helper,
);
// temporary: remove helm/kube management from terraform
match terraform_init_validate_state_list(temp_dir.as_str()) {
@@ -625,7 +623,7 @@ impl<'a> EKS<'a> {
.collect();
let charts_prerequisites = ChartsConfigPrerequisites {
organization_id: self.cloud_provider.organization_id().to_string(),
organization_long_id: self.cloud_provider.organization_long_id,
organization_long_id: self.cloud_provider.organization_long_id(),
infra_options: self.options.clone(),
cluster_id: self.id.clone(),
cluster_long_id: self.long_id,
@@ -633,8 +631,8 @@ impl<'a> EKS<'a> {
cluster_name: self.cluster_name().to_string(),
cloud_provider: "aws".to_string(),
test_cluster: self.context.is_test_cluster(),
aws_access_key_id: self.cloud_provider.access_key_id.to_string(),
aws_secret_access_key: self.cloud_provider.secret_access_key.to_string(),
aws_access_key_id: self.cloud_provider.access_key_id().to_string(),
aws_secret_access_key: self.cloud_provider.secret_access_key().to_string(),
vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(),
qovery_engine_location: self.get_engine_location(),
ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory),
@@ -695,275 +693,6 @@ impl<'a> EKS<'a> {
))
}
fn upgrade(&self) -> Result<(), EngineError> {
let kubeconfig = match self.config_file() {
Ok(f) => f.0,
Err(e) => return Err(e),
};
match is_kubernetes_upgrade_required(
kubeconfig,
&self.version,
self.cloud_provider.credentials_environment_variables(),
) {
Ok(x) => self.upgrade_with_status(x),
Err(e) => {
let msg = format!(
"Error detected, upgrade won't occurs, but standard deployment. {:?}",
e.message
);
error!("{}", &msg);
Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: Some(msg),
})
}
}
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
let send_to_customer = |message: &str| {
listeners_helper.upgrade_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(message),
self.context.execution_id(),
))
};
send_to_customer(
format!(
"Start preparing EKS upgrade process {} cluster with id {}",
self.name(),
self.id()
)
.as_str(),
);
let temp_dir = workspace_directory(
self.context.workspace_root_dir(),
self.context.execution_id(),
format!("bootstrap/{}", self.id()),
)
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?;
let kubeconfig = match self.config_file() {
Ok(f) => f.0,
Err(e) => {
error!("Can't perform a Kubernetes upgrade, can't locate kubeconfig");
return Err(e);
}
};
// generate terraform files and copy them into temp dir
let mut context = self.tera_context()?;
//
// Upgrade master nodes
//
match &kubernetes_upgrade_status.required_upgrade_on {
Some(KubernetesNodesType::Masters) => {
let message = format!(
"Start upgrading process for master nodes on {}/{}",
self.name(),
self.id()
);
info!("{}", &message);
send_to_customer(&message);
// AWS requires the upgrade to be done in 2 steps (masters, then workers)
// use the current kubernetes masters' version for workers, in order to avoid migration in one step
context.insert(
"kubernetes_master_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
// use the current master version for workers, they will be updated later
context.insert(
"eks_workers_version",
format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
send_to_customer(format!("Upgrading Kubernetes {} master nodes", self.name()).as_str());
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!(
"Kubernetes {} master nodes have been successfully upgraded",
self.name()
);
info!("{}", &message);
send_to_customer(&message);
}
Err(e) => {
error!(
"Error while upgrading master nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
}
Some(KubernetesNodesType::Workers) => {
info!("No need to perform Kubernetes master upgrade, they are already up to date")
}
None => {
info!("No Kubernetes upgrade required, masters and workers are already up to date");
return Ok(());
}
}
//
// Upgrade worker nodes
//
let message = format!(
"Preparing workers nodes for upgrade for Kubernetes cluster {}",
self.name()
);
info!("{}", &message);
send_to_customer(message.as_str());
// disable cluster autoscaler to avoid interfering with AWS upgrade procedure
context.insert("enable_cluster_autoscaler", &false);
context.insert(
"eks_workers_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
// copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory.
// this is due to the required dependencies of lib/aws/bootstrap/*.tf files
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
let message = format!("Upgrading Kubernetes {} worker nodes", self.name());
info!("{}", &message);
send_to_customer(message.as_str());
// disable cluster autoscaler deployment
info!("down-scaling cluster autoscaler to 0");
match kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
0,
) {
Ok(_) => {}
Err(e) => {
return Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: e.message,
})
}
};
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!(
"Kubernetes {} workers nodes have been successfully upgraded",
self.name()
);
info!("{}", &message);
send_to_customer(&message);
}
Err(e) => {
// enable cluster autoscaler deployment
info!("up-scaling cluster autoscaler to 1");
let _ = kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
1,
);
error!(
"Error while upgrading master nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
// enable cluster autoscaler deployment
info!("up-scaling cluster autoscaler to 1");
match kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
1,
) {
Ok(_) => {}
Err(e) => {
return Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: e.message,
})
}
};
Ok(())
}
fn upgrade_error(&self) -> Result<(), EngineError> {
Ok(())
}
@@ -1171,24 +900,15 @@ impl<'a> EKS<'a> {
fn delete(&self) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
let mut skip_kubernetes_step = false;
let send_to_customer = |message: &str| {
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(message),
self.context.execution_id(),
))
};
send_to_customer(format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str());
self.send_to_customer(
format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str(),
&listeners_helper,
);
let temp_dir = workspace_directory(
self.context.workspace_root_dir(),
self.context.execution_id(),
format!("bootstrap/{}", self.id()),
)
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?;
let temp_dir = match self.get_temp_dir() {
Ok(dir) => dir,
Err(e) => return Err(e),
};
// generate terraform files and copy them into temp dir
let context = self.tera_context()?;
@@ -1236,7 +956,7 @@ impl<'a> EKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
info!("Running Terraform apply before running a delete");
if let Err(e) = cast_simple_error_to_engine_error(
@@ -1255,7 +975,7 @@ impl<'a> EKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
let all_namespaces = kubectl_exec_get_all_namespaces(
&kubernetes_config_file_path,
@@ -1302,7 +1022,7 @@ impl<'a> EKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
// delete custom metrics api to avoid stale namespaces on deletion
let _ = cmd::helm::helm_uninstall_list(
@@ -1410,7 +1130,7 @@ impl<'a> EKS<'a> {
let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id());
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
info!("Running Terraform destroy");
let terraform_result =
@@ -1430,7 +1150,7 @@ impl<'a> EKS<'a> {
Ok(_) => {
let message = format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id());
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
Ok(())
}
Err(Operation { error, .. }) => Err(error),
@@ -1529,6 +1249,234 @@ impl<'a> Kubernetes for EKS<'a> {
send_progress_on_long_task(self, Action::Create, || self.create_error())
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
self.send_to_customer(
format!(
"Start preparing EKS upgrade process {} cluster with id {}",
self.name(),
self.id()
)
.as_str(),
&listeners_helper,
);
let temp_dir = match self.get_temp_dir() {
Ok(dir) => dir,
Err(e) => return Err(e),
};
let kubeconfig = match self.get_kubeconfig() {
Ok(path) => path,
Err(e) => return Err(e),
};
// generate terraform files and copy them into temp dir
let mut context = self.tera_context()?;
//
// Upgrade master nodes
//
match &kubernetes_upgrade_status.required_upgrade_on {
Some(KubernetesNodesType::Masters) => {
let message = format!(
"Start upgrading process for master nodes on {}/{}",
self.name(),
self.id()
);
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
// AWS requires the upgrade to be done in 2 steps (masters, then workers)
// use the current kubernetes masters' version for workers, in order to avoid migration in one step
context.insert(
"kubernetes_master_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
// use the current master version for workers, they will be updated later
context.insert(
"eks_workers_version",
format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
self.send_to_customer(
format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(),
&listeners_helper,
);
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!(
"Kubernetes {} master nodes have been successfully upgraded",
self.name()
);
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
}
Err(e) => {
error!(
"Error while upgrading master nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
}
Some(KubernetesNodesType::Workers) => {
info!("No need to perform Kubernetes master upgrade, they are already up to date")
}
None => {
info!("No Kubernetes upgrade required, masters and workers are already up to date");
return Ok(());
}
}
//
// Upgrade worker nodes
//
let message = format!(
"Preparing workers nodes for upgrade for Kubernetes cluster {}",
self.name()
);
info!("{}", &message);
self.send_to_customer(message.as_str(), &listeners_helper);
// disable cluster autoscaler to avoid interfering with AWS upgrade procedure
context.insert("enable_cluster_autoscaler", &false);
context.insert(
"eks_workers_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
// copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory.
// this is due to the required dependencies of lib/aws/bootstrap/*.tf files
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
let message = format!("Upgrading Kubernetes {} worker nodes", self.name());
info!("{}", &message);
self.send_to_customer(message.as_str(), &listeners_helper);
// disable cluster autoscaler deployment
info!("down-scaling cluster autoscaler to 0");
match kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
0,
) {
Ok(_) => {}
Err(e) => {
return Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: e.message,
})
}
};
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!(
"Kubernetes {} workers nodes have been successfully upgraded",
self.name()
);
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
}
Err(e) => {
// enable cluster autoscaler deployment
info!("up-scaling cluster autoscaler to 1");
let _ = kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
1,
);
error!(
"Error while upgrading master nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
// enable cluster autoscaler deployment
info!("up-scaling cluster autoscaler to 1");
match kubectl_exec_scale_replicas(
&kubeconfig,
self.cloud_provider().credentials_environment_variables(),
"kube-system",
ScalingKind::Deployment,
"cluster-autoscaler-aws-cluster-autoscaler",
1,
) {
Ok(_) => {}
Err(e) => {
return Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: e.message,
})
}
};
Ok(())
}
#[named]
fn on_upgrade(&self) -> Result<(), EngineError> {
print_action(

View File

@@ -3,6 +3,7 @@ use std::any::Any;
use rusoto_core::{Client, HttpClient, Region};
use rusoto_credential::StaticProvider;
use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient};
use uuid::Uuid;
use crate::cloud_provider::{CloudProvider, EngineError, Kind, TerraformStateCredentials};
use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY};
@@ -82,10 +83,26 @@ impl CloudProvider for AWS {
self.organization_id.as_str()
}
fn organization_long_id(&self) -> Uuid {
self.organization_long_id
}
fn name(&self) -> &str {
self.name.as_str()
}
fn access_key_id(&self) -> String {
self.access_key_id.to_string()
}
fn secret_access_key(&self) -> String {
self.secret_access_key.to_string()
}
fn token(&self) -> &str {
todo!()
}
fn is_valid(&self) -> Result<(), EngineError> {
let client = StsClient::new_with_client(self.client(), Region::default());
let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default()));

View File

@@ -15,10 +15,11 @@ use crate::cloud_provider::digitalocean::network::load_balancer::do_get_load_bal
use crate::cloud_provider::digitalocean::network::vpc::{
get_do_random_available_subnet_from_api, get_do_vpc_name_available_from_api, VpcInitKind,
};
use crate::cloud_provider::digitalocean::DO;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo, ChartSetValue, HelmChartNamespaces};
use crate::cloud_provider::kubernetes::{send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes};
use crate::cloud_provider::kubernetes::{
send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes, KubernetesUpgradeStatus, ProviderOptions,
};
use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::utilities::print_action;
@@ -78,6 +79,8 @@ pub struct DoksOptions {
pub tls_email_report: String,
}
impl ProviderOptions for DoksOptions {}
pub struct DOKS<'a> {
context: Context,
id: String,
@@ -85,7 +88,7 @@ pub struct DOKS<'a> {
name: String,
version: String,
region: Region,
cloud_provider: &'a DO,
cloud_provider: &'a dyn CloudProvider,
nodes_groups: Vec<NodeGroups>,
dns_provider: &'a dyn DnsProvider,
spaces: Spaces,
@@ -102,7 +105,7 @@ impl<'a> DOKS<'a> {
name: String,
version: String,
region: Region,
cloud_provider: &'a DO,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
nodes_groups: Vec<NodeGroups>,
options: DoksOptions,
@@ -117,7 +120,8 @@ impl<'a> DOKS<'a> {
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
node_group.instance_type,
cloud_provider.name()
)),
));
}
@@ -127,8 +131,8 @@ impl<'a> DOKS<'a> {
context.clone(),
"spaces-temp-id".to_string(),
"my-spaces-object-storage".to_string(),
cloud_provider.spaces_access_id.clone(),
cloud_provider.spaces_secret_key.clone(),
cloud_provider.access_key_id().clone(),
cloud_provider.secret_access_key().clone(),
region,
BucketDeleteStrategy::HardDelete,
);
@@ -146,7 +150,7 @@ impl<'a> DOKS<'a> {
options,
nodes_groups,
template_directory,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
listeners: cloud_provider.listeners().clone(), // copy listeners from CloudProvider
})
}
@@ -167,12 +171,12 @@ impl<'a> DOKS<'a> {
let mut context = TeraContext::new();
// Digital Ocean
context.insert("digitalocean_token", &self.cloud_provider.token);
context.insert("digitalocean_token", &self.cloud_provider.token());
context.insert("do_region", &self.region.to_string());
// Digital Ocean: Spaces Credentials
context.insert("spaces_access_id", &self.cloud_provider.spaces_access_id);
context.insert("spaces_secret_key", &self.cloud_provider.spaces_secret_key);
context.insert("spaces_access_id", &self.cloud_provider.access_key_id());
context.insert("spaces_secret_key", &self.cloud_provider.secret_access_key());
let space_kubeconfig_bucket = format!("qovery-kubeconfigs-{}", self.id.as_str());
context.insert("space_bucket_kubeconfig", &space_kubeconfig_bucket);
@@ -182,11 +186,11 @@ impl<'a> DOKS<'a> {
let vpc_cidr_block = match self.options.vpc_cidr_set {
// VPC subnet is not set, getting a non used subnet
VpcInitKind::Autodetect => {
match get_do_vpc_name_available_from_api(&self.cloud_provider.token, self.options.vpc_name.clone()) {
match get_do_vpc_name_available_from_api(self.cloud_provider.token(), self.options.vpc_name.clone()) {
Ok(vpcs) => match vpcs {
// new vpc: select a random non used subnet
None => {
match get_do_random_available_subnet_from_api(&self.cloud_provider.token, self.region) {
match get_do_random_available_subnet_from_api(&self.cloud_provider.token(), self.region) {
Ok(x) => x,
Err(e) => {
return Err(EngineError {
@@ -251,7 +255,7 @@ impl<'a> DOKS<'a> {
let doks_version = match self.get_doks_info_from_name_api() {
Ok(x) => match x {
// new cluster, we check the wished version is supported by DO
None => match get_do_latest_doks_slug_from_api(self.cloud_provider.token.as_str(), self.version()) {
None => match get_do_latest_doks_slug_from_api(self.cloud_provider.token(), self.version()) {
Ok(version) => match version {
None => return Err(EngineError {
cause: EngineErrorCause::Internal,
@@ -407,7 +411,7 @@ impl<'a> DOKS<'a> {
fn do_loadbalancer_hostname(&self) -> String {
format!(
"qovery-nginx-{}.{}",
self.cloud_provider.id,
self.cloud_provider.id(),
self.dns_provider().domain()
)
}
@@ -423,7 +427,7 @@ impl<'a> DOKS<'a> {
// return cluster info from name if exists
fn get_doks_info_from_name_api(&self) -> Result<Option<KubernetesCluster>, SimpleError> {
let api_url = format!("{}/clusters", DoApiType::Doks.api_url());
let json_content = do_get_from_api(self.cloud_provider.token.as_str(), DoApiType::Doks, api_url)?;
let json_content = do_get_from_api(self.cloud_provider.token(), DoApiType::Doks, api_url)?;
// TODO(benjaminch): `qovery-` to be added into Rust name directly everywhere
get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id().to_string()))
}
@@ -614,7 +618,7 @@ impl<'a> DOKS<'a> {
let charts_prerequisites = ChartsConfigPrerequisites {
organization_id: self.cloud_provider.organization_id().to_string(),
organization_long_id: self.cloud_provider.organization_long_id,
organization_long_id: self.cloud_provider.organization_long_id(),
infra_options: self.options.clone(),
cluster_id: self.id.clone(),
cluster_long_id: self.long_id,
@@ -623,9 +627,9 @@ impl<'a> DOKS<'a> {
cluster_name: self.cluster_name().to_string(),
cloud_provider: "digitalocean".to_string(),
test_cluster: self.context.is_test_cluster(),
do_token: self.cloud_provider.token.to_string(),
do_space_access_id: self.cloud_provider.spaces_access_id.to_string(),
do_space_secret_key: self.cloud_provider.spaces_secret_key.to_string(),
do_token: self.cloud_provider.token().to_string(),
do_space_access_id: self.cloud_provider.access_key_id().to_string(),
do_space_secret_key: self.cloud_provider.secret_access_key().to_string(),
do_space_bucket_kubeconfig: self.kubeconfig_bucket_name(),
do_space_kubeconfig_filename: self.kubeconfig_file_name(),
qovery_engine_location: self.options.qovery_engine_location.clone(),
@@ -696,7 +700,7 @@ impl<'a> DOKS<'a> {
})
}
};
let nginx_ingress_loadbalancer_ip = match do_get_load_balancer_ip(&self.cloud_provider.token, nginx_ingress_loadbalancer_id.as_str()) {
let nginx_ingress_loadbalancer_ip = match do_get_load_balancer_ip(self.cloud_provider.token(), nginx_ingress_loadbalancer_id.as_str()) {
Ok(x) => x.to_string(),
Err(e) => {
return Err(EngineError {
@@ -778,10 +782,6 @@ impl<'a> DOKS<'a> {
))
}
fn upgrade(&self) -> Result<(), EngineError> {
Ok(())
}
fn upgrade_error(&self) -> Result<(), EngineError> {
Ok(())
}
@@ -805,24 +805,15 @@ impl<'a> DOKS<'a> {
fn delete(&self) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
let mut skip_kubernetes_step = false;
let send_to_customer = |message: &str| {
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(message),
self.context.execution_id(),
))
};
send_to_customer(format!("Preparing to delete DOKS cluster {} with id {}", self.name(), self.id()).as_str());
self.send_to_customer(
format!("Preparing to delete DOKS cluster {} with id {}", self.name(), self.id()).as_str(),
&listeners_helper,
);
let temp_dir = workspace_directory(
self.context.workspace_root_dir(),
self.context.execution_id(),
format!("bootstrap/{}", self.id()),
)
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?;
let temp_dir = match self.get_temp_dir() {
Ok(dir) => dir,
Err(e) => return Err(e),
};
// generate terraform files and copy them into temp dir
let context = self.tera_context()?;
@@ -870,7 +861,7 @@ impl<'a> DOKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
info!("Running Terraform apply before running a delete");
if let Err(e) = cast_simple_error_to_engine_error(
@@ -889,7 +880,7 @@ impl<'a> DOKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
let all_namespaces = kubectl_exec_get_all_namespaces(
&kubernetes_config_file_path,
@@ -936,7 +927,7 @@ impl<'a> DOKS<'a> {
self.id()
);
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
// delete custom metrics api to avoid stale namespaces on deletion
let _ = cmd::helm::helm_uninstall_list(
@@ -1041,7 +1032,7 @@ impl<'a> DOKS<'a> {
let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id());
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
info!("Running Terraform destroy");
let terraform_result =
@@ -1061,7 +1052,7 @@ impl<'a> DOKS<'a> {
Ok(_) => {
let message = format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id());
info!("{}", &message);
send_to_customer(&message);
self.send_to_customer(&message, &listeners_helper);
}
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(msg)) => {
@@ -1162,6 +1153,86 @@ impl<'a> Kubernetes for DOKS<'a> {
send_progress_on_long_task(self, Action::Create, || self.create_error())
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
self.send_to_customer(
format!(
"Start preparing DOKS upgrade process {} cluster with id {}",
self.name(),
self.id()
)
.as_str(),
&listeners_helper,
);
let temp_dir = match self.get_temp_dir() {
Ok(dir) => dir,
Err(e) => return Err(e),
};
// generate terraform files and copy them into temp dir
let mut context = self.tera_context()?;
//
// Upgrade nodes
//
let message = format!("Start upgrading process for nodes on {}/{}", self.name(), self.id());
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
context.insert(
"doks_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
self.send_to_customer(
format!("Upgrading Kubernetes {} nodes", self.name()).as_str(),
&listeners_helper,
);
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!("Kubernetes {} nodes have been successfully upgraded", self.name());
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
}
Err(e) => {
error!(
"Error while upgrading nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
Ok(())
}
#[named]
fn on_upgrade(&self) -> Result<(), EngineError> {
print_action(

View File

@@ -3,6 +3,7 @@ extern crate digitalocean;
use std::any::Any;
use digitalocean::DigitalOcean;
use uuid::Uuid;
use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials};
use crate::constants::DIGITAL_OCEAN_TOKEN;
@@ -78,10 +79,26 @@ impl CloudProvider for DO {
self.organization_id.as_str()
}
fn organization_long_id(&self) -> Uuid {
self.organization_long_id
}
fn name(&self) -> &str {
self.name.as_str()
}
fn access_key_id(&self) -> String {
self.spaces_access_id.to_string()
}
fn secret_access_key(&self) -> String {
self.spaces_secret_key.to_string()
}
fn token(&self) -> &str {
self.token.as_str()
}
fn is_valid(&self) -> Result<(), EngineError> {
let client = DigitalOcean::new(&self.token);
match client {

View File

@@ -28,11 +28,14 @@ use crate::error::SimpleErrorKind::Other;
use crate::error::{
cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope, SimpleError, SimpleErrorKind,
};
use crate::fs::workspace_directory;
use crate::models::ProgressLevel::Info;
use crate::models::{Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, StringPath};
use crate::object_storage::ObjectStorage;
use crate::unit_conversion::{any_to_mi, cpu_string_to_float};
pub trait ProviderOptions {}
pub trait Kubernetes: Listen {
fn context(&self) -> &Context;
fn kind(&self) -> Kind;
@@ -118,6 +121,34 @@ pub trait Kubernetes: Listen {
}
fn on_create(&self) -> Result<(), EngineError>;
fn on_create_error(&self) -> Result<(), EngineError>;
fn upgrade(&self) -> Result<(), EngineError> {
let kubeconfig = match self.config_file() {
Ok(f) => f.0,
Err(e) => return Err(e),
};
match is_kubernetes_upgrade_required(
kubeconfig,
&self.version(),
self.cloud_provider().credentials_environment_variables(),
) {
Ok(x) => self.upgrade_with_status(x),
Err(e) => {
let msg = format!(
"Error detected, upgrade won't occurs, but standard deployment. {:?}",
e.message
);
error!("{}", &msg);
Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context().execution_id().to_string(),
message: Some(msg),
})
}
}
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError>;
fn on_upgrade(&self) -> Result<(), EngineError>;
fn on_upgrade_error(&self) -> Result<(), EngineError>;
fn on_downgrade(&self) -> Result<(), EngineError>;
@@ -143,6 +174,35 @@ pub trait Kubernetes: Listen {
Some(message),
)
}
fn send_to_customer(&self, message: &str, listeners_helper: &ListenersHelper) {
listeners_helper.upgrade_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context().execution_id().to_string(),
},
ProgressLevel::Info,
Some(message),
self.context().execution_id(),
))
}
fn get_temp_dir(&self) -> Result<String, EngineError> {
workspace_directory(
self.context().workspace_root_dir(),
self.context().execution_id(),
format!("bootstrap/{}", self.id()),
)
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))
}
fn get_kubeconfig(&self) -> Result<StringPath, EngineError> {
let path = match self.config_file() {
Ok(f) => f.0,
Err(e) => {
error!("Can't perform a Kubernetes upgrade, can't locate kubeconfig");
return Err(e);
}
};
return Ok(path);
}
}
pub trait KubernetesNode {

View File

@@ -24,10 +24,14 @@ pub trait CloudProvider: Listen {
fn kind(&self) -> Kind;
fn id(&self) -> &str;
fn organization_id(&self) -> &str;
fn organization_long_id(&self) -> uuid::Uuid;
fn name(&self) -> &str;
fn name_with_id(&self) -> String {
format!("{} ({})", self.name(), self.id())
}
fn access_key_id(&self) -> String;
fn secret_access_key(&self) -> String;
fn token(&self) -> &str;
fn is_valid(&self) -> Result<(), EngineError>;
/// environment variables containing credentials
fn credentials_environment_variables(&self) -> Vec<(&str, &str)>;

View File

@@ -5,14 +5,13 @@ use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes,
KubernetesUpgradeStatus,
KubernetesUpgradeStatus, ProviderOptions,
};
use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::scaleway::application::Zone;
use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::scaleway::kubernetes::node::ScwInstancesType;
use crate::cloud_provider::scaleway::Scaleway;
use crate::cloud_provider::utilities::print_action;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd::kubectl::{kubectl_exec_get_all_namespaces, kubectl_exec_get_events};
@@ -65,6 +64,8 @@ pub struct KapsuleOptions {
pub tls_email_report: String,
}
impl ProviderOptions for KapsuleOptions {}
impl KapsuleOptions {
pub fn new(
qovery_api_url: String,
@@ -112,7 +113,7 @@ pub struct Kapsule<'a> {
name: String,
version: String,
zone: Zone,
cloud_provider: &'a Scaleway,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
object_storage: ScalewayOS,
nodes_groups: Vec<NodeGroups>,
@@ -129,7 +130,7 @@ impl<'a> Kapsule<'a> {
name: String,
version: String,
zone: Zone,
cloud_provider: &'a Scaleway,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
nodes_groups: Vec<NodeGroups>,
options: KapsuleOptions,
@@ -144,7 +145,8 @@ impl<'a> Kapsule<'a> {
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
node_group.instance_type,
cloud_provider.name()
)),
));
}
@@ -154,8 +156,8 @@ impl<'a> Kapsule<'a> {
context.clone(),
"s3-temp-id".to_string(),
"default-s3".to_string(),
cloud_provider.access_key.clone(),
cloud_provider.secret_key.clone(),
cloud_provider.access_key_id().clone(),
cloud_provider.secret_access_key().clone(),
zone,
BucketDeleteStrategy::Empty,
false,
@@ -174,7 +176,7 @@ impl<'a> Kapsule<'a> {
nodes_groups,
template_directory,
options,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
listeners: cloud_provider.listeners().clone(), // copy listeners from CloudProvider
})
}
@@ -492,15 +494,15 @@ impl<'a> Kapsule<'a> {
let charts_prerequisites = ChartsConfigPrerequisites::new(
self.cloud_provider.organization_id().to_string(),
self.cloud_provider.organization_long_id,
self.cloud_provider.organization_long_id(),
self.id().to_string(),
self.long_id,
self.zone,
self.cluster_name(),
"scw".to_string(),
self.context.is_test_cluster(),
self.cloud_provider.access_key.to_string(),
self.cloud_provider.secret_key.to_string(),
self.cloud_provider.access_key_id().to_string(),
self.cloud_provider.secret_access_key().to_string(),
self.options.scaleway_project_id.to_string(),
self.options.qovery_engine_location.clone(),
self.context.is_feature_enabled(&Features::LogsHistory),
@@ -562,39 +564,6 @@ impl<'a> Kapsule<'a> {
))
}
fn upgrade(&self) -> Result<(), EngineError> {
let kubeconfig = match self.config_file() {
Ok(f) => f.0,
Err(e) => return Err(e),
};
match is_kubernetes_upgrade_required(
kubeconfig,
&self.version,
self.cloud_provider.credentials_environment_variables(),
) {
Ok(x) => self.upgrade_with_status(x),
Err(e) => {
let msg = format!(
"Error detected, upgrade won't occurs, but standard deployment. {:?}",
e.message
);
error!("{}", &msg);
Err(EngineError {
cause: EngineErrorCause::Internal,
scope: EngineErrorScope::Engine,
execution_id: self.context.execution_id().to_string(),
message: Some(msg),
})
}
}
}
fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
// TODO(benjaminch): to be implemented
Ok(())
}
fn upgrade_error(&self) -> Result<(), EngineError> {
Ok(())
}
@@ -1000,6 +969,86 @@ impl<'a> Kubernetes for Kapsule<'a> {
send_progress_on_long_task(self, Action::Create, || self.create_error())
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> {
let listeners_helper = ListenersHelper::new(&self.listeners);
self.send_to_customer(
format!(
"Start preparing Kapsule upgrade process {} cluster with id {}",
self.name(),
self.id()
)
.as_str(),
&listeners_helper,
);
let temp_dir = match self.get_temp_dir() {
Ok(dir) => dir,
Err(e) => return Err(e),
};
// generate terraform files and copy them into temp dir
let mut context = self.tera_context()?;
//
// Upgrade nodes
//
let message = format!("Start upgrading process for nodes on {}/{}", self.name(), self.id());
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
context.insert(
"kubernetes_cluster_version",
format!("{}", &kubernetes_upgrade_status.requested_version).as_str(),
);
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
self.template_directory.as_str(),
temp_dir.as_str(),
&context,
),
)?;
let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::copy_non_template_files(
format!("{}/common/bootstrap/charts", self.context.lib_root_dir()),
common_charts_temp_dir.as_str(),
),
)?;
self.send_to_customer(
format!("Upgrading Kubernetes {} nodes", self.name()).as_str(),
&listeners_helper,
);
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()),
) {
Ok(_) => {
let message = format!("Kubernetes {} nodes have been successfully upgraded", self.name());
info!("{}", &message);
self.send_to_customer(&message, &listeners_helper);
}
Err(e) => {
error!(
"Error while upgrading nodes for cluster {} with id {}.",
self.name(),
self.id()
);
return Err(e);
}
}
Ok(())
}
#[named]
fn on_upgrade(&self) -> Result<(), EngineError> {
print_action(

View File

@@ -1,4 +1,5 @@
use std::any::Any;
use uuid::Uuid;
use crate::cloud_provider::{CloudProvider, EngineError, Kind, TerraformStateCredentials};
use crate::constants::{SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY};
@@ -66,10 +67,26 @@ impl CloudProvider for Scaleway {
self.organization_id.as_str()
}
fn organization_long_id(&self) -> Uuid {
self.organization_long_id
}
fn name(&self) -> &str {
self.name.as_str()
}
fn access_key_id(&self) -> String {
self.access_key.to_string()
}
fn secret_access_key(&self) -> String {
self.secret_key.to_string()
}
fn token(&self) -> &str {
todo!()
}
fn is_valid(&self) -> Result<(), EngineError> {
// TODO(benjaminch): To be implemented
Ok(())

View File

@@ -347,7 +347,7 @@ impl<'a> Transaction<'a> {
}
}
pub fn commit(&mut self) -> TransactionResult {
pub fn commit(mut self) -> TransactionResult {
let mut applications_by_environment: HashMap<&Environment, Vec<Box<dyn Application>>> = HashMap::new();
for step in self.steps.iter() {

View File

@@ -27,6 +27,7 @@ time = "0.2.23"
hashicorp_vault = "2.0.1"
maplit = "1.0.2"
uuid = { version = "0.8", features = ["v4"] }
const_format = "0.2.22"
# Digital Ocean Deps
digitalocean = "0.1.1"

View File

@@ -1,25 +1,28 @@
extern crate serde;
extern crate serde_derive;
use tracing::error;
use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode, EKS};
use const_format::formatcp;
use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use qovery_engine::cloud_provider::aws::AWS;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::docker_hub::DockerHub;
use qovery_engine::container_registry::ecr::ECR;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::engine::Engine;
use qovery_engine::models::{Context, EnvironmentAction};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use qovery_engine::models::Context;
use tracing::error;
use crate::cloudflare::dns_provider_cloudflare;
use crate::common::Cluster;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
pub const AWS_QOVERY_ORGANIZATION_ID: &str = "u8nb94c7fwxzr2jt";
pub const AWS_REGION_FOR_S3: &str = "eu-west-3";
pub const AWS_KUBERNETES_VERSION: &str = "1.18";
pub const AWS_TEST_REGION: &str = "us-east-2";
pub const AWS_KUBERNETES_MAJOR_VERSION: u8 = 1;
pub const AWS_KUBERNETES_MINOR_VERSION: u8 = 18;
pub const AWS_KUBERNETES_VERSION: &'static str =
formatcp!("{}.{}", AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION);
pub const AWS_KUBE_TEST_CLUSTER_ID: &str = "dmubm9agk7sr8a8r";
pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t2.micro";
pub const AWS_DATABASE_DISK_TYPE: &str = "gp2";
@@ -54,202 +57,131 @@ pub fn container_registry_docker_hub(context: &Context) -> DockerHub {
)
}
pub fn aws_kubernetes_nodes() -> Vec<NodeGroups> {
vec![NodeGroups::new("groupeks0".to_string(), 5, 10, "t3a.large".to_string())
.expect("Problem while setup EKS nodes")]
}
impl Cluster<AWS, Options> for AWS {
fn docker_cr_engine(context: &Context) -> Engine {
// use ECR
let container_registry = Box::new(container_registry_ecr(context));
pub fn cloud_provider_aws(context: &Context) -> AWS {
let secrets = FuncTestsSecrets::new();
AWS::new(
context.clone(),
"u8nb94c7fwxzr2jt",
AWS_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
"QoveryTest",
secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(),
secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(),
TerraformStateCredentials {
access_key_id: secrets.TERRAFORM_AWS_ACCESS_KEY_ID.unwrap(),
secret_access_key: secrets.TERRAFORM_AWS_SECRET_ACCESS_KEY.unwrap(),
region: "eu-west-3".to_string(),
},
)
}
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
pub fn eks_options(secrets: FuncTestsSecrets) -> Options {
Options {
eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()],
eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()],
eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()],
rds_zone_a_subnet_blocks: vec![
"10.0.214.0/23".to_string(),
"10.0.216.0/23".to_string(),
"10.0.218.0/23".to_string(),
"10.0.220.0/23".to_string(),
"10.0.222.0/23".to_string(),
"10.0.224.0/23".to_string(),
],
rds_zone_b_subnet_blocks: vec![
"10.0.226.0/23".to_string(),
"10.0.228.0/23".to_string(),
"10.0.230.0/23".to_string(),
"10.0.232.0/23".to_string(),
"10.0.234.0/23".to_string(),
"10.0.236.0/23".to_string(),
],
rds_zone_c_subnet_blocks: vec![
"10.0.238.0/23".to_string(),
"10.0.240.0/23".to_string(),
"10.0.242.0/23".to_string(),
"10.0.244.0/23".to_string(),
"10.0.246.0/23".to_string(),
"10.0.248.0/23".to_string(),
],
documentdb_zone_a_subnet_blocks: vec![
"10.0.196.0/23".to_string(),
"10.0.198.0/23".to_string(),
"10.0.200.0/23".to_string(),
],
documentdb_zone_b_subnet_blocks: vec![
"10.0.202.0/23".to_string(),
"10.0.204.0/23".to_string(),
"10.0.206.0/23".to_string(),
],
documentdb_zone_c_subnet_blocks: vec![
"10.0.208.0/23".to_string(),
"10.0.210.0/23".to_string(),
"10.0.212.0/23".to_string(),
],
elasticache_zone_a_subnet_blocks: vec!["10.0.172.0/23".to_string(), "10.0.174.0/23".to_string()],
elasticache_zone_b_subnet_blocks: vec!["10.0.176.0/23".to_string(), "10.0.178.0/23".to_string()],
elasticache_zone_c_subnet_blocks: vec!["10.0.180.0/23".to_string(), "10.0.182.0/23".to_string()],
elasticsearch_zone_a_subnet_blocks: vec!["10.0.184.0/23".to_string(), "10.0.186.0/23".to_string()],
elasticsearch_zone_b_subnet_blocks: vec!["10.0.188.0/23".to_string(), "10.0.190.0/23".to_string()],
elasticsearch_zone_c_subnet_blocks: vec!["10.0.192.0/23".to_string(), "10.0.194.0/23".to_string()],
vpc_qovery_network_mode: VpcQoveryNetworkMode::WithoutNatGateways,
vpc_cidr_block: "10.0.0.0/16".to_string(),
eks_cidr_subnet: "20".to_string(),
eks_access_cidr_blocks: secrets
.EKS_ACCESS_CIDR_BLOCKS
.unwrap()
.replace("\"", "")
.replace("[", "")
.replace("]", "")
.split(",")
.map(|c| c.to_string())
.collect(),
rds_cidr_subnet: "23".to_string(),
documentdb_cidr_subnet: "23".to_string(),
elasticache_cidr_subnet: "23".to_string(),
elasticsearch_cidr_subnet: "23".to_string(),
qovery_api_url: secrets.QOVERY_API_URL.unwrap(),
qovery_engine_location: ClientSide,
engine_version_controller_token: secrets.QOVERY_ENGINE_CONTROLLER_TOKEN.unwrap(),
agent_version_controller_token: secrets.QOVERY_AGENT_CONTROLLER_TOKEN.unwrap(),
grafana_admin_user: "admin".to_string(),
grafana_admin_password: "qovery".to_string(),
discord_api_key: secrets.DISCORD_API_URL.unwrap(),
qovery_nats_url: secrets.QOVERY_NATS_URL.unwrap(),
qovery_ssh_key: secrets.QOVERY_SSH_USER.unwrap(),
qovery_nats_user: secrets.QOVERY_NATS_USERNAME.unwrap(),
qovery_nats_password: secrets.QOVERY_NATS_PASSWORD.unwrap(),
tls_email_report: secrets.LETS_ENCRYPT_EMAIL_REPORT.unwrap(),
qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(),
qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(),
// use AWS
let cloud_provider = AWS::cloud_provider(context);
let dns_provider = Box::new(dns_provider_cloudflare(context));
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
fn cloud_provider(context: &Context) -> Box<AWS> {
let secrets = FuncTestsSecrets::new();
Box::new(AWS::new(
context.clone(),
"u8nb94c7fwxzr2jt",
AWS_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
"QoveryTest",
secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(),
secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(),
TerraformStateCredentials {
access_key_id: secrets.TERRAFORM_AWS_ACCESS_KEY_ID.unwrap(),
secret_access_key: secrets.TERRAFORM_AWS_SECRET_ACCESS_KEY.unwrap(),
region: "eu-west-3".to_string(),
},
))
}
fn kubernetes_nodes() -> Vec<NodeGroups> {
vec![NodeGroups::new("groupeks0".to_string(), 5, 10, "t3a.large".to_string())
.expect("Problem while setup EKS nodes")]
}
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option<String>) -> Options {
Options {
eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()],
eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()],
eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()],
rds_zone_a_subnet_blocks: vec![
"10.0.214.0/23".to_string(),
"10.0.216.0/23".to_string(),
"10.0.218.0/23".to_string(),
"10.0.220.0/23".to_string(),
"10.0.222.0/23".to_string(),
"10.0.224.0/23".to_string(),
],
rds_zone_b_subnet_blocks: vec![
"10.0.226.0/23".to_string(),
"10.0.228.0/23".to_string(),
"10.0.230.0/23".to_string(),
"10.0.232.0/23".to_string(),
"10.0.234.0/23".to_string(),
"10.0.236.0/23".to_string(),
],
rds_zone_c_subnet_blocks: vec![
"10.0.238.0/23".to_string(),
"10.0.240.0/23".to_string(),
"10.0.242.0/23".to_string(),
"10.0.244.0/23".to_string(),
"10.0.246.0/23".to_string(),
"10.0.248.0/23".to_string(),
],
documentdb_zone_a_subnet_blocks: vec![
"10.0.196.0/23".to_string(),
"10.0.198.0/23".to_string(),
"10.0.200.0/23".to_string(),
],
documentdb_zone_b_subnet_blocks: vec![
"10.0.202.0/23".to_string(),
"10.0.204.0/23".to_string(),
"10.0.206.0/23".to_string(),
],
documentdb_zone_c_subnet_blocks: vec![
"10.0.208.0/23".to_string(),
"10.0.210.0/23".to_string(),
"10.0.212.0/23".to_string(),
],
elasticache_zone_a_subnet_blocks: vec!["10.0.172.0/23".to_string(), "10.0.174.0/23".to_string()],
elasticache_zone_b_subnet_blocks: vec!["10.0.176.0/23".to_string(), "10.0.178.0/23".to_string()],
elasticache_zone_c_subnet_blocks: vec!["10.0.180.0/23".to_string(), "10.0.182.0/23".to_string()],
elasticsearch_zone_a_subnet_blocks: vec!["10.0.184.0/23".to_string(), "10.0.186.0/23".to_string()],
elasticsearch_zone_b_subnet_blocks: vec!["10.0.188.0/23".to_string(), "10.0.190.0/23".to_string()],
elasticsearch_zone_c_subnet_blocks: vec!["10.0.192.0/23".to_string(), "10.0.194.0/23".to_string()],
vpc_qovery_network_mode: VpcQoveryNetworkMode::WithoutNatGateways,
vpc_cidr_block: "10.0.0.0/16".to_string(),
eks_cidr_subnet: "20".to_string(),
eks_access_cidr_blocks: secrets
.EKS_ACCESS_CIDR_BLOCKS
.unwrap()
.replace("\"", "")
.replace("[", "")
.replace("]", "")
.split(",")
.map(|c| c.to_string())
.collect(),
rds_cidr_subnet: "23".to_string(),
documentdb_cidr_subnet: "23".to_string(),
elasticache_cidr_subnet: "23".to_string(),
elasticsearch_cidr_subnet: "23".to_string(),
qovery_api_url: secrets.QOVERY_API_URL.unwrap(),
qovery_engine_location: ClientSide,
engine_version_controller_token: secrets.QOVERY_ENGINE_CONTROLLER_TOKEN.unwrap(),
agent_version_controller_token: secrets.QOVERY_AGENT_CONTROLLER_TOKEN.unwrap(),
grafana_admin_user: "admin".to_string(),
grafana_admin_password: "qovery".to_string(),
discord_api_key: secrets.DISCORD_API_URL.unwrap(),
qovery_nats_url: secrets.QOVERY_NATS_URL.unwrap(),
qovery_ssh_key: secrets.QOVERY_SSH_USER.unwrap(),
qovery_nats_user: secrets.QOVERY_NATS_USERNAME.unwrap(),
qovery_nats_password: secrets.QOVERY_NATS_PASSWORD.unwrap(),
tls_email_report: secrets.LETS_ENCRYPT_EMAIL_REPORT.unwrap(),
qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(),
qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(),
}
}
}
pub fn aws_kubernetes_eks<'a>(
context: &Context,
cloud_provider: &'a AWS,
dns_provider: &'a dyn DnsProvider,
nodes_groups: Vec<NodeGroups>,
) -> EKS<'a> {
let secrets = FuncTestsSecrets::new();
EKS::<'a>::new(
context.clone(),
AWS_KUBE_TEST_CLUSTER_ID,
uuid::Uuid::new_v4(),
AWS_KUBE_TEST_CLUSTER_ID,
AWS_KUBERNETES_VERSION,
secrets.clone().AWS_DEFAULT_REGION.unwrap().as_str(),
cloud_provider,
dns_provider,
eks_options(secrets),
nodes_groups,
)
.unwrap()
}
pub fn docker_ecr_aws_engine(context: &Context) -> Engine {
// use ECR
let container_registry = Box::new(container_registry_ecr(context));
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
// use AWS
let cloud_provider = Box::new(cloud_provider_aws(context));
let dns_provider = Box::new(dns_provider_cloudflare(context));
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
pub fn deploy_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = docker_ecr_aws_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_aws(context);
let nodes = aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let eks = aws_kubernetes_eks(context, &cp, &dns_provider, nodes);
let _ = tx.deploy_environment_with_options(
&eks,
&environment_action,
DeploymentOption {
force_build: true,
force_push: true,
},
);
tx.commit()
}
pub fn delete_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = docker_ecr_aws_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_aws(context);
let nodes = aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let eks = aws_kubernetes_eks(context, &cp, &dns_provider, nodes);
let _ = tx.delete_environment(&eks, &environment_action);
tx.commit()
}
pub fn pause_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = docker_ecr_aws_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_aws(context);
let nodes = aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let eks = aws_kubernetes_eks(context, &cp, &dns_provider, nodes);
let _ = tx.pause_environment(&eks, &environment_action);
tx.commit()
}

View File

@@ -5,22 +5,169 @@ use chrono::Utc;
use qovery_engine::cloud_provider::utilities::sanitize_name;
use qovery_engine::models::{
Action, Application, Context, Database, DatabaseKind, Environment, GitCredentials, Route, Router, Storage,
StorageType,
Action, Application, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction,
GitCredentials, Route, Router, Storage, StorageType,
};
use crate::utilities::{generate_id, generate_password, get_svc_name};
use crate::aws::{AWS_KUBERNETES_VERSION, AWS_KUBE_TEST_CLUSTER_ID};
use crate::cloudflare::dns_provider_cloudflare;
use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_KUBE_TEST_CLUSTER_ID, DO_KUBE_TEST_CLUSTER_NAME};
use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_KUBE_TEST_CLUSTER_ID, SCW_KUBE_TEST_CLUSTER_NAME};
use crate::utilities::{
context, db_disk_type, db_infos, db_instance_type, generate_cluster_id, generate_id, generate_password, get_pvc,
get_svc, get_svc_name, init, FuncTestsSecrets,
};
use base64;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS};
use qovery_engine::cloud_provider::aws::AWS;
use qovery_engine::cloud_provider::digitalocean::application::Region;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::cloud_provider::digitalocean::DO;
use qovery_engine::cloud_provider::kubernetes::Kubernetes;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::scaleway::application::Zone;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::cloud_provider::{CloudProvider, Kind};
use qovery_engine::cmd::structs::SVCItem;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::engine::Engine;
use qovery_engine::models::DatabaseMode::CONTAINER;
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use std::collections::BTreeMap;
use std::str::FromStr;
use tracing::{error, info, span, warn, Level};
pub fn execution_id() -> String {
Utc::now()
.to_rfc3339()
.replace(":", "-")
.replace(".", "-")
.replace("+", "-")
pub trait Cluster<T, U> {
fn docker_cr_engine(context: &Context) -> Engine;
fn cloud_provider(context: &Context) -> Box<T>;
fn kubernetes_nodes() -> Vec<NodeGroups>;
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option<String>) -> U;
}
pub trait Infrastructure {
fn deploy_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult;
fn pause_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult;
fn delete_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult;
}
impl Infrastructure for Environment {
fn deploy_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
let engine: Engine = match provider_kind {
Kind::Aws => AWS::docker_cr_engine(context),
Kind::Do => DO::docker_cr_engine(context),
Kind::Scw => Scaleway::docker_cr_engine(context),
};
let session = engine.session().unwrap();
let mut tx = session.transaction();
let dns_provider = dns_provider_cloudflare(context);
let cp: Box<dyn CloudProvider>;
cp = match provider_kind {
Kind::Aws => AWS::cloud_provider(context),
Kind::Do => DO::cloud_provider(context),
Kind::Scw => Scaleway::cloud_provider(context),
};
let k;
k = get_environment_test_kubernetes(provider_kind, context, cp.as_ref(), &dns_provider);
let _ = tx.deploy_environment_with_options(
k.as_ref(),
&environment_action,
DeploymentOption {
force_build: true,
force_push: true,
},
);
tx.commit()
}
fn pause_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
let engine: Engine = match provider_kind {
Kind::Aws => AWS::docker_cr_engine(context),
Kind::Do => DO::docker_cr_engine(context),
Kind::Scw => Scaleway::docker_cr_engine(context),
};
let session = engine.session().unwrap();
let mut tx = session.transaction();
let dns_provider = dns_provider_cloudflare(context);
let cp: Box<dyn CloudProvider>;
cp = match provider_kind {
Kind::Aws => AWS::cloud_provider(context),
Kind::Do => DO::cloud_provider(context),
Kind::Scw => Scaleway::cloud_provider(context),
};
let k;
k = get_environment_test_kubernetes(provider_kind, context, cp.as_ref(), &dns_provider);
let _ = tx.pause_environment(k.as_ref(), &environment_action);
tx.commit()
}
fn delete_environment(
&self,
provider_kind: Kind,
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
let engine: Engine = match provider_kind {
Kind::Aws => AWS::docker_cr_engine(context),
Kind::Do => DO::docker_cr_engine(context),
Kind::Scw => Scaleway::docker_cr_engine(context),
};
let session = engine.session().unwrap();
let mut tx = session.transaction();
let dns_provider = dns_provider_cloudflare(context);
let cp: Box<dyn CloudProvider>;
cp = match provider_kind {
Kind::Aws => AWS::cloud_provider(context),
Kind::Do => DO::cloud_provider(context),
Kind::Scw => Scaleway::cloud_provider(context),
};
let k;
k = get_environment_test_kubernetes(provider_kind, context, cp.as_ref(), &dns_provider);
let _ = tx.delete_environment(k.as_ref(), &environment_action);
tx.commit()
}
}
pub enum ClusterTestType {
Classic,
WithPause,
WithUpgrade,
}
pub fn environment_3_apps_3_routers_3_databases(
@@ -660,3 +807,466 @@ pub fn environment_only_http_server_router(context: &Context, organization_id: &
clone_from_environment_id: None,
}
}
pub fn test_db(
context: Context,
mut environment: Environment,
secrets: FuncTestsSecrets,
version: &str,
test_name: &str,
db_kind: DatabaseKind,
provider_kind: Kind,
database_mode: DatabaseMode,
is_public: bool,
) -> String {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context_for_delete = context.clone_not_same_execution_id();
let app_id = generate_id();
let database_username = "superuser".to_string();
let database_password = generate_id();
let db_kind_str = db_kind.name().to_string();
let database_host = format!(
"{}-{}.{}",
db_kind_str.clone(),
generate_id(),
secrets.clone().DEFAULT_TEST_DOMAIN.unwrap()
);
let dyn_db_fqdn = match is_public.clone() {
true => database_host.clone(),
false => match database_mode.clone() {
DatabaseMode::MANAGED => format!("{}-dns", app_id.clone()),
DatabaseMode::CONTAINER => get_svc_name(db_kind.clone(), provider_kind.clone()).to_string(),
},
};
let db_infos = db_infos(
db_kind.clone(),
database_mode.clone(),
database_username.clone(),
database_password.clone(),
dyn_db_fqdn.clone(),
);
let database_port = db_infos.db_port.clone();
let database_db_name = db_infos.db_name.clone();
let storage_size = 10;
let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone());
let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone());
let db = Database {
kind: db_kind.clone(),
action: Action::Create,
id: app_id.clone(),
name: database_db_name.clone(),
version: version.to_string(),
fqdn_id: format!("{}-{}", db_kind_str.clone(), generate_id()),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "100m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: storage_size.clone(),
database_instance_type: db_instance_type.to_string(),
database_disk_type: db_disk_type.to_string(),
activate_high_availability: false,
activate_backups: false,
publicly_accessible: is_public.clone(),
mode: database_mode.clone(),
};
environment.databases = vec![db.clone()];
let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id());
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = app_name.clone();
app.commit_id = db_infos.app_commit.clone();
app.private_port = Some(1234);
app.dockerfile_path = Some(format!("Dockerfile-{}", version));
app.environment_vars = db_infos.app_env_vars.clone();
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = app_name.clone();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match environment.deploy_environment(provider_kind.clone(), &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
}
let kube_cluster_id = match provider_kind {
Kind::Aws => AWS_KUBE_TEST_CLUSTER_ID,
Kind::Do => DO_KUBE_TEST_CLUSTER_ID,
Kind::Scw => SCW_KUBE_TEST_CLUSTER_ID,
};
match database_mode.clone() {
DatabaseMode::CONTAINER => {
match get_pvc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
match get_svc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(svc) => assert_eq!(
svc.items
.expect("No items in svc")
.into_iter()
.filter(|svc| svc
.metadata
.name
.contains(get_svc_name(db_kind.clone(), provider_kind.clone()))
&& &svc.spec.svc_type == "LoadBalancer")
.collect::<Vec<SVCItem>>()
.len(),
match is_public {
true => 1,
false => 0,
}
),
Err(_) => assert!(false),
};
}
DatabaseMode::MANAGED => {
match get_svc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(svc) => {
let service = svc
.items
.expect("No items in svc")
.into_iter()
.filter(|svc| {
svc.metadata.name.contains(format!("{}-dns", app_id.clone()).as_str())
&& svc.spec.svc_type == "ExternalName"
})
.collect::<Vec<SVCItem>>();
let annotations = &service[0].metadata.annotations;
assert_eq!(service.len(), 1);
match is_public {
true => {
assert!(annotations.contains_key("external-dns.alpha.kubernetes.io/hostname"));
assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_host);
}
false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")),
}
}
Err(_) => assert!(false),
};
}
}
match environment_delete.delete_environment(provider_kind.clone(), &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
}
return test_name.to_string();
}
pub fn get_environment_test_kubernetes<'a>(
provider_kind: Kind,
context: &Context,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
) -> Box<dyn Kubernetes + 'a> {
let secrets = FuncTestsSecrets::new();
let k: Box<dyn Kubernetes>;
match provider_kind {
Kind::Aws => {
k = Box::new(
EKS::new(
context.clone(),
AWS_KUBE_TEST_CLUSTER_ID,
uuid::Uuid::new_v4(),
AWS_KUBE_TEST_CLUSTER_ID,
AWS_KUBERNETES_VERSION,
secrets.clone().AWS_DEFAULT_REGION.unwrap().as_str(),
cloud_provider,
dns_provider,
AWS::kubernetes_cluster_options(secrets, None),
AWS::kubernetes_nodes(),
)
.unwrap(),
);
}
Kind::Do => {
k = Box::new(
DOKS::new(
context.clone(),
DO_KUBE_TEST_CLUSTER_ID.to_string(),
uuid::Uuid::new_v4(),
DO_KUBE_TEST_CLUSTER_NAME.to_string(),
DO_KUBERNETES_VERSION.to_string(),
Region::from_str(secrets.clone().DIGITAL_OCEAN_DEFAULT_REGION.unwrap().as_str()).unwrap(),
cloud_provider,
dns_provider,
DO::kubernetes_nodes(),
DO::kubernetes_cluster_options(secrets, Option::from(DO_KUBE_TEST_CLUSTER_ID.to_string())),
)
.unwrap(),
);
}
Kind::Scw => {
k = Box::new(
Kapsule::new(
context.clone(),
SCW_KUBE_TEST_CLUSTER_ID.to_string(),
uuid::Uuid::new_v4(),
SCW_KUBE_TEST_CLUSTER_NAME.to_string(),
SCW_KUBERNETES_VERSION.to_string(),
Zone::from_str(secrets.clone().SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap(),
cloud_provider,
dns_provider,
Scaleway::kubernetes_nodes(),
Scaleway::kubernetes_cluster_options(secrets, None),
)
.unwrap(),
);
}
}
return k;
}
pub fn get_cluster_test_kubernetes<'a>(
provider_kind: Kind,
secrets: FuncTestsSecrets,
context: &Context,
cluster_id: String,
cluster_name: String,
boot_version: String,
localisation: &str,
cloud_provider: &'a dyn CloudProvider,
dns_provider: &'a dyn DnsProvider,
vpc_network_mode: Option<VpcQoveryNetworkMode>,
) -> Box<dyn Kubernetes + 'a> {
let k: Box<dyn Kubernetes>;
match provider_kind {
Kind::Aws => {
let mut options = AWS::kubernetes_cluster_options(secrets, None);
options.vpc_qovery_network_mode = vpc_network_mode.unwrap();
k = Box::new(
EKS::new(
context.clone(),
cluster_id.as_str(),
uuid::Uuid::new_v4(),
cluster_name.as_str(),
boot_version.as_str(),
localisation.clone(),
cloud_provider,
dns_provider,
options,
AWS::kubernetes_nodes(),
)
.unwrap(),
);
}
Kind::Do => {
k = Box::new(
DOKS::new(
context.clone(),
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_name.clone(),
boot_version,
Region::from_str(localisation.clone()).expect("Unknown region set for DOKS"),
cloud_provider,
dns_provider,
DO::kubernetes_nodes(),
DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)),
)
.unwrap(),
);
}
Kind::Scw => {
k = Box::new(
Kapsule::new(
context.clone(),
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_name.clone(),
boot_version,
Zone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"),
cloud_provider,
dns_provider,
Scaleway::kubernetes_nodes(),
Scaleway::kubernetes_cluster_options(secrets, None),
)
.unwrap(),
);
}
}
return k;
}
pub fn cluster_test(
test_name: &str,
provider_kind: Kind,
localisation: &str,
secrets: FuncTestsSecrets,
test_type: ClusterTestType,
major_boot_version: u8,
minor_boot_version: u8,
vpc_network_mode: Option<VpcQoveryNetworkMode>,
) -> String {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let cluster_id = generate_cluster_id(localisation.clone());
let cluster_name = generate_cluster_id(localisation.clone());
let boot_version = format!("{}.{}", major_boot_version, minor_boot_version.clone());
let engine;
match provider_kind {
Kind::Aws => engine = AWS::docker_cr_engine(&context),
Kind::Do => engine = DO::docker_cr_engine(&context),
Kind::Scw => engine = Scaleway::docker_cr_engine(&context),
};
let dns_provider = dns_provider_cloudflare(&context);
let mut deploy_tx = engine.session().unwrap().transaction();
let mut delete_tx = engine.session().unwrap().transaction();
let cp: Box<dyn CloudProvider>;
cp = match provider_kind {
Kind::Aws => AWS::cloud_provider(&context),
Kind::Do => DO::cloud_provider(&context),
Kind::Scw => Scaleway::cloud_provider(&context),
};
let kubernetes = get_cluster_test_kubernetes(
provider_kind.clone(),
secrets.clone(),
&context,
cluster_id.clone(),
cluster_name.clone(),
boot_version.clone(),
localisation.clone(),
cp.as_ref(),
&dns_provider,
vpc_network_mode.clone(),
);
// Deploy
if let Err(err) = deploy_tx.create_kubernetes(kubernetes.as_ref()) {
panic!("{:?}", err)
}
let _ = match deploy_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match test_type {
ClusterTestType::Classic => {}
ClusterTestType::WithPause => {
let mut pause_tx = engine.session().unwrap().transaction();
let mut resume_tx = engine.session().unwrap().transaction();
// Pause
if let Err(err) = pause_tx.pause_kubernetes(kubernetes.as_ref()) {
panic!("{:?}", err)
}
match pause_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Resume
if let Err(err) = resume_tx.create_kubernetes(kubernetes.as_ref()) {
panic!("{:?}", err)
}
let _ = match resume_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
ClusterTestType::WithUpgrade => {
let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version.clone() + 1);
let upgraded_kubernetes = get_cluster_test_kubernetes(
provider_kind.clone(),
secrets.clone(),
&context,
cluster_id.clone(),
cluster_name.clone(),
upgrade_to_version.clone(),
localisation.clone(),
cp.as_ref(),
&dns_provider,
vpc_network_mode.clone(),
);
let mut upgrade_tx = engine.session().unwrap().transaction();
let mut delete_tx = engine.session().unwrap().transaction();
// Upgrade
if let Err(err) = upgrade_tx.create_kubernetes(upgraded_kubernetes.as_ref()) {
panic!("{:?}", err)
}
let _ = match upgrade_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Delete
let _ = upgraded_kubernetes.version();
if let Err(err) = delete_tx.delete_kubernetes(upgraded_kubernetes.as_ref()) {
panic!("{:?}", err)
}
match delete_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
return test_name.to_string();
}
}
// Delete
if let Err(err) = delete_tx.delete_kubernetes(kubernetes.as_ref()) {
panic!("{:?}", err)
}
match delete_tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
}

View File

@@ -1,24 +1,26 @@
use const_format::formatcp;
use qovery_engine::build_platform::Image;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind;
use qovery_engine::cloud_provider::digitalocean::DO;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::docr::DOCR;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::engine::Engine;
use qovery_engine::error::EngineError;
use qovery_engine::models::{Context, Environment, EnvironmentAction};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use qovery_engine::models::{Context, Environment};
use crate::cloudflare::dns_provider_cloudflare;
use crate::common::Cluster;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
use qovery_engine::cloud_provider::digitalocean::application::Region;
use qovery_engine::cloud_provider::qovery::EngineLocation;
pub const DO_QOVERY_ORGANIZATION_ID: &str = "z3bc003d2";
pub const DO_KUBERNETES_VERSION: &str = "1.19";
pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1;
pub const DO_KUBERNETES_MINOR_VERSION: u8 = 19;
pub const DO_KUBERNETES_VERSION: &'static str =
formatcp!("{}.{}", DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION);
pub const DOCR_ID: &str = "registry-the-one-and-unique";
pub const DO_KUBE_TEST_CLUSTER_ID: &str = "za80c56a0";
pub const DO_KUBE_TEST_CLUSTER_NAME: &str = "qovery-za80c56a0";
@@ -38,158 +40,73 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR {
)
}
pub fn docker_cr_do_engine(context: &Context) -> Engine {
// use DigitalOcean Container Registry
let container_registry = Box::new(container_registry_digital_ocean(context));
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
// use Digital Ocean
let cloud_provider = Box::new(cloud_provider_digitalocean(context));
impl Cluster<DO, DoksOptions> for DO {
fn docker_cr_engine(context: &Context) -> Engine {
// use DigitalOcean Container Registry
let container_registry = Box::new(container_registry_digital_ocean(context));
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
// use Digital Ocean
let cloud_provider = DO::cloud_provider(context);
let dns_provider = Box::new(dns_provider_cloudflare(&context));
let dns_provider = Box::new(dns_provider_cloudflare(&context));
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
pub fn do_kubernetes_ks<'a>(
context: &Context,
cloud_provider: &'a DO,
dns_provider: &'a dyn DnsProvider,
nodes_groups: Vec<NodeGroups>,
region: Region,
) -> DOKS<'a> {
let secrets = FuncTestsSecrets::new();
DOKS::<'a>::new(
context.clone(),
DO_KUBE_TEST_CLUSTER_ID.to_string(),
uuid::Uuid::new_v4(),
DO_KUBE_TEST_CLUSTER_NAME.to_string(),
DO_KUBERNETES_VERSION.to_string(),
region,
cloud_provider,
dns_provider,
nodes_groups,
do_kubernetes_cluster_options(secrets, DO_KUBE_TEST_CLUSTER_ID.to_string()),
)
.unwrap()
}
pub fn do_kubernetes_nodes() -> Vec<NodeGroups> {
vec![
NodeGroups::new("groupdoks0".to_string(), 5, 10, "s-4vcpu-8gb".to_string())
.expect("Problem while setup DOKS nodes"),
]
}
pub fn cloud_provider_digitalocean(context: &Context) -> DO {
let secrets = FuncTestsSecrets::new();
DO::new(
context.clone(),
DO_KUBE_TEST_CLUSTER_ID,
DO_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(),
secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().as_str(),
secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().as_str(),
DO_KUBE_TEST_CLUSTER_NAME,
TerraformStateCredentials {
access_key_id: secrets.TERRAFORM_AWS_ACCESS_KEY_ID.unwrap(),
secret_access_key: secrets.TERRAFORM_AWS_SECRET_ACCESS_KEY.unwrap(),
region: secrets.TERRAFORM_AWS_REGION.unwrap(),
},
)
}
pub fn do_kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_name: String) -> DoksOptions {
DoksOptions {
vpc_cidr_block: "should-not-bet-set".to_string(), // vpc_cidr_set to autodetect will fil this empty string
vpc_cidr_set: VpcInitKind::Autodetect,
vpc_name: cluster_name,
qovery_api_url: secrets.QOVERY_API_URL.unwrap(),
qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(),
qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(),
qovery_engine_location: EngineLocation::ClientSide,
engine_version_controller_token: secrets.QOVERY_ENGINE_CONTROLLER_TOKEN.unwrap(),
agent_version_controller_token: secrets.QOVERY_AGENT_CONTROLLER_TOKEN.unwrap(),
grafana_admin_user: "admin".to_string(),
grafana_admin_password: "qovery".to_string(),
discord_api_key: secrets.DISCORD_API_URL.unwrap(),
qovery_nats_url: secrets.QOVERY_NATS_URL.unwrap(),
qovery_nats_user: secrets.QOVERY_NATS_USERNAME.unwrap(),
qovery_nats_password: secrets.QOVERY_NATS_PASSWORD.unwrap(),
qovery_ssh_key: secrets.QOVERY_SSH_USER.unwrap(),
tls_email_report: secrets.LETS_ENCRYPT_EMAIL_REPORT.unwrap(),
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
}
pub fn deploy_environment(
context: &Context,
environment_action: EnvironmentAction,
region: Region,
) -> TransactionResult {
let engine = docker_cr_do_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
fn cloud_provider(context: &Context) -> Box<DO> {
let secrets = FuncTestsSecrets::new();
Box::new(DO::new(
context.clone(),
DO_KUBE_TEST_CLUSTER_ID,
DO_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(),
secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().as_str(),
secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().as_str(),
DO_KUBE_TEST_CLUSTER_NAME,
TerraformStateCredentials {
access_key_id: secrets.TERRAFORM_AWS_ACCESS_KEY_ID.unwrap(),
secret_access_key: secrets.TERRAFORM_AWS_SECRET_ACCESS_KEY.unwrap(),
region: secrets.TERRAFORM_AWS_REGION.unwrap(),
},
))
}
let cp = cloud_provider_digitalocean(context);
let nodes = do_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let doks = do_kubernetes_ks(context, &cp, &dns_provider, nodes, region);
fn kubernetes_nodes() -> Vec<NodeGroups> {
vec![
NodeGroups::new("groupdoks0".to_string(), 5, 10, "s-4vcpu-8gb".to_string())
.expect("Problem while setup DOKS nodes"),
]
}
let _ = tx.deploy_environment_with_options(
&doks,
&environment_action,
DeploymentOption {
force_build: true,
force_push: true,
},
);
tx.commit()
}
pub fn delete_environment(
context: &Context,
environment_action: EnvironmentAction,
region: Region,
) -> TransactionResult {
let engine = docker_cr_do_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_digitalocean(context);
let nodes = do_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(&context);
let doks = do_kubernetes_ks(context, &cp, &dns_provider, nodes, region);
let _ = tx.delete_environment(&doks, &environment_action);
tx.commit()
}
pub fn pause_environment(
context: &Context,
environment_action: EnvironmentAction,
region: Region,
) -> TransactionResult {
let engine = docker_cr_do_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_digitalocean(context);
let nodes = do_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(&context);
let doks = do_kubernetes_ks(context, &cp, &dns_provider, nodes, region);
let _ = tx.pause_environment(&doks, &environment_action);
tx.commit()
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_name: Option<String>) -> DoksOptions {
DoksOptions {
vpc_cidr_block: "should-not-bet-set".to_string(), // vpc_cidr_set to autodetect will fil this empty string
vpc_cidr_set: VpcInitKind::Autodetect,
vpc_name: cluster_name.unwrap(),
qovery_api_url: secrets.QOVERY_API_URL.unwrap(),
qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(),
qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(),
qovery_engine_location: EngineLocation::ClientSide,
engine_version_controller_token: secrets.QOVERY_ENGINE_CONTROLLER_TOKEN.unwrap(),
agent_version_controller_token: secrets.QOVERY_AGENT_CONTROLLER_TOKEN.unwrap(),
grafana_admin_user: "admin".to_string(),
grafana_admin_password: "qovery".to_string(),
discord_api_key: secrets.DISCORD_API_URL.unwrap(),
qovery_nats_url: secrets.QOVERY_NATS_URL.unwrap(),
qovery_nats_user: secrets.QOVERY_NATS_USERNAME.unwrap(),
qovery_nats_password: secrets.QOVERY_NATS_PASSWORD.unwrap(),
qovery_ssh_key: secrets.QOVERY_SSH_USER.unwrap(),
tls_email_report: secrets.LETS_ENCRYPT_EMAIL_REPORT.unwrap(),
}
}
}
pub fn clean_environments(

View File

@@ -1,19 +1,19 @@
use const_format::formatcp;
use qovery_engine::build_platform::Image;
use qovery_engine::cloud_provider::scaleway::application::Zone;
use qovery_engine::cloud_provider::scaleway::kubernetes::{Kapsule, KapsuleOptions};
use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions;
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR;
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::engine::Engine;
use qovery_engine::error::EngineError;
use qovery_engine::models::{Context, Environment, EnvironmentAction};
use qovery_engine::models::{Context, Environment};
use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use crate::cloudflare::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets};
use crate::common::Cluster;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::qovery::EngineLocation;
use tracing::error;
@@ -22,7 +22,10 @@ pub const SCW_QOVERY_ORGANIZATION_ID: &str = "zcf8e78e6";
pub const SCW_KUBE_TEST_CLUSTER_NAME: &str = "qovery-z093e29e2";
pub const SCW_KUBE_TEST_CLUSTER_ID: &str = "z093e29e2";
pub const SCW_TEST_ZONE: Zone = Zone::Paris2;
pub const SCW_KUBERNETES_VERSION: &str = "1.18";
pub const SCW_KUBERNETES_MAJOR_VERSION: u8 = 1;
pub const SCW_KUBERNETES_MINOR_VERSION: u8 = 18;
pub const SCW_KUBERNETES_VERSION: &'static str =
formatcp!("{}.{}", SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION);
pub const SCW_MANAGED_DATABASE_INSTANCE_TYPE: &str = "db-dev-s";
pub const SCW_MANAGED_DATABASE_DISK_TYPE: &str = "bssd";
pub const SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE: &str = "";
@@ -55,76 +58,104 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR {
)
}
pub fn cloud_provider_scaleway(context: &Context) -> Scaleway {
let secrets = FuncTestsSecrets::new();
impl Cluster<Scaleway, KapsuleOptions> for Scaleway {
fn docker_cr_engine(context: &Context) -> Engine {
// use Scaleway CR
let container_registry = Box::new(container_registry_scw(context));
Scaleway::new(
context.clone(),
SCW_KUBE_TEST_CLUSTER_ID,
SCW_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
SCW_KUBE_TEST_CLUSTER_NAME,
secrets
.SCALEWAY_ACCESS_KEY
.expect("SCALEWAY_ACCESS_KEY is not set in secrets")
.as_str(),
secrets
.SCALEWAY_SECRET_KEY
.expect("SCALEWAY_SECRET_KEY is not set in secrets")
.as_str(),
secrets
.SCALEWAY_DEFAULT_PROJECT_ID
.expect("SCALEWAY_DEFAULT_PROJECT_ID is not set in secrets")
.as_str(),
TerraformStateCredentials {
access_key_id: secrets
.TERRAFORM_AWS_ACCESS_KEY_ID
.expect("TERRAFORM_AWS_ACCESS_KEY_ID is not set in secrets"),
secret_access_key: secrets
.TERRAFORM_AWS_SECRET_ACCESS_KEY
.expect("TERRAFORM_AWS_SECRET_ACCESS_KEY is not set in secrets"),
region: "eu-west-3".to_string(),
},
)
}
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
pub fn scw_kubernetes_cluster_options(secrets: FuncTestsSecrets) -> KapsuleOptions {
KapsuleOptions::new(
secrets.QOVERY_API_URL.expect("QOVERY_API_URL is not set in secrets"),
secrets.QOVERY_GRPC_URL.expect("QOVERY_GRPC_URL is not set in secrets"),
secrets
.QOVERY_CLUSTER_SECRET_TOKEN
.expect("QOVERY_CLUSTER_SECRET_TOKEN is not set in secrets"),
secrets.QOVERY_NATS_URL.expect("QOVERY_NATS_URL is not set in secrets"),
secrets
.QOVERY_NATS_USERNAME
.expect("QOVERY_NATS_USERNAME is not set in secrets"),
secrets
.QOVERY_NATS_PASSWORD
.expect("QOVERY_NATS_PASSWORD is not set in secrets"),
secrets.QOVERY_SSH_USER.expect("QOVERY_SSH_USER is not set in secrets"),
"admin".to_string(),
"qovery".to_string(),
secrets
.QOVERY_AGENT_CONTROLLER_TOKEN
.expect("QOVERY_AGENT_CONTROLLER_TOKEN is not set in secrets"),
EngineLocation::ClientSide,
secrets
.QOVERY_ENGINE_CONTROLLER_TOKEN
.expect("QOVERY_ENGINE_CONTROLLER_TOKEN is not set in secrets"),
secrets
.SCALEWAY_DEFAULT_PROJECT_ID
.expect("SCALEWAY_DEFAULT_PROJECT_ID is not set in secrets"),
secrets
.SCALEWAY_ACCESS_KEY
.expect("SCALEWAY_ACCESS_KEY is not set in secrets"),
secrets
.SCALEWAY_SECRET_KEY
.expect("SCALEWAY_SECRET_KEY is not set in secrets"),
secrets
.LETS_ENCRYPT_EMAIL_REPORT
.expect("LETS_ENCRYPT_EMAIL_REPORT is not set in secrets"),
)
// use Scaleway
let cloud_provider = Scaleway::cloud_provider(context);
let dns_provider = Box::new(dns_provider_cloudflare(context));
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
fn cloud_provider(context: &Context) -> Box<Scaleway> {
let secrets = FuncTestsSecrets::new();
Box::new(Scaleway::new(
context.clone(),
SCW_KUBE_TEST_CLUSTER_ID,
SCW_QOVERY_ORGANIZATION_ID,
uuid::Uuid::new_v4(),
SCW_KUBE_TEST_CLUSTER_NAME,
secrets
.SCALEWAY_ACCESS_KEY
.expect("SCALEWAY_ACCESS_KEY is not set in secrets")
.as_str(),
secrets
.SCALEWAY_SECRET_KEY
.expect("SCALEWAY_SECRET_KEY is not set in secrets")
.as_str(),
secrets
.SCALEWAY_DEFAULT_PROJECT_ID
.expect("SCALEWAY_DEFAULT_PROJECT_ID is not set in secrets")
.as_str(),
TerraformStateCredentials {
access_key_id: secrets
.TERRAFORM_AWS_ACCESS_KEY_ID
.expect("TERRAFORM_AWS_ACCESS_KEY_ID is not set in secrets"),
secret_access_key: secrets
.TERRAFORM_AWS_SECRET_ACCESS_KEY
.expect("TERRAFORM_AWS_SECRET_ACCESS_KEY is not set in secrets"),
region: "eu-west-3".to_string(),
},
))
}
fn kubernetes_nodes() -> Vec<NodeGroups> {
// Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L
vec![NodeGroups::new("groupscw0".to_string(), 5, 10, "dev1-l".to_string())
.expect("Problem while setup SCW nodes")]
}
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option<String>) -> KapsuleOptions {
KapsuleOptions::new(
secrets.QOVERY_API_URL.expect("QOVERY_API_URL is not set in secrets"),
secrets.QOVERY_GRPC_URL.expect("QOVERY_GRPC_URL is not set in secrets"),
secrets
.QOVERY_CLUSTER_SECRET_TOKEN
.expect("QOVERY_CLUSTER_SECRET_TOKEN is not set in secrets"),
secrets.QOVERY_NATS_URL.expect("QOVERY_NATS_URL is not set in secrets"),
secrets
.QOVERY_NATS_USERNAME
.expect("QOVERY_NATS_USERNAME is not set in secrets"),
secrets
.QOVERY_NATS_PASSWORD
.expect("QOVERY_NATS_PASSWORD is not set in secrets"),
secrets.QOVERY_SSH_USER.expect("QOVERY_SSH_USER is not set in secrets"),
"admin".to_string(),
"qovery".to_string(),
secrets
.QOVERY_AGENT_CONTROLLER_TOKEN
.expect("QOVERY_AGENT_CONTROLLER_TOKEN is not set in secrets"),
EngineLocation::ClientSide,
secrets
.QOVERY_ENGINE_CONTROLLER_TOKEN
.expect("QOVERY_ENGINE_CONTROLLER_TOKEN is not set in secrets"),
secrets
.SCALEWAY_DEFAULT_PROJECT_ID
.expect("SCALEWAY_DEFAULT_PROJECT_ID is not set in secrets"),
secrets
.SCALEWAY_ACCESS_KEY
.expect("SCALEWAY_ACCESS_KEY is not set in secrets"),
secrets
.SCALEWAY_SECRET_KEY
.expect("SCALEWAY_SECRET_KEY is not set in secrets"),
secrets
.LETS_ENCRYPT_EMAIL_REPORT
.expect("LETS_ENCRYPT_EMAIL_REPORT is not set in secrets"),
)
}
}
pub fn scw_object_storage(context: Context, region: Zone) -> ScalewayOS {
@@ -147,107 +178,6 @@ pub fn scw_object_storage(context: Context, region: Zone) -> ScalewayOS {
)
}
pub fn scw_kubernetes_nodes() -> Vec<NodeGroups> {
// Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L
vec![NodeGroups::new("groupscw0".to_string(), 5, 10, "dev1-l".to_string()).expect("Problem while setup SCW nodes")]
}
pub fn docker_scw_cr_engine(context: &Context) -> Engine {
// use Scaleway CR
let container_registry = Box::new(container_registry_scw(context));
// use LocalDocker
let build_platform = Box::new(build_platform_local_docker(context));
// use Scaleway
let cloud_provider = Box::new(cloud_provider_scaleway(context));
let dns_provider = Box::new(dns_provider_cloudflare(context));
Engine::new(
context.clone(),
build_platform,
container_registry,
cloud_provider,
dns_provider,
)
}
pub fn scw_kubernetes_kapsule<'a>(
context: &Context,
cloud_provider: &'a Scaleway,
dns_provider: &'a dyn DnsProvider,
nodes_groups: Vec<NodeGroups>,
zone: Zone,
) -> Kapsule<'a> {
let secrets = FuncTestsSecrets::new();
Kapsule::<'a>::new(
context.clone(),
SCW_KUBE_TEST_CLUSTER_ID.to_string(),
uuid::Uuid::new_v4(),
SCW_KUBE_TEST_CLUSTER_NAME.to_string(),
SCW_KUBERNETES_VERSION.to_string(),
zone,
cloud_provider,
dns_provider,
nodes_groups,
scw_kubernetes_cluster_options(secrets),
)
.unwrap()
}
pub fn deploy_environment(context: &Context, environment_action: EnvironmentAction, zone: Zone) -> TransactionResult {
let engine = docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_scaleway(context);
let nodes = scw_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let kapsule = scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes, zone);
let _ = tx.deploy_environment_with_options(
&kapsule,
&environment_action,
DeploymentOption {
force_build: true,
force_push: true,
},
);
tx.commit()
}
pub fn delete_environment(context: &Context, environment_action: EnvironmentAction, zone: Zone) -> TransactionResult {
let engine = docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_scaleway(context);
let nodes = scw_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let kapsule = scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes, zone);
let _ = tx.delete_environment(&kapsule, &environment_action);
tx.commit()
}
pub fn pause_environment(context: &Context, environment_action: EnvironmentAction, zone: Zone) -> TransactionResult {
let engine = docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = cloud_provider_scaleway(context);
let nodes = scw_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let kapsule = scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes, zone);
let _ = tx.pause_environment(&kapsule, &environment_action);
tx.commit()
}
pub fn clean_environments(
context: &Context,
environments: Vec<Environment>,

View File

@@ -20,38 +20,39 @@ use retry::delay::Fibonacci;
use retry::OperationResult;
use std::env;
use std::fs;
use tracing::{error, info, span, warn, Level};
use tracing::{error, info, warn};
use tracing_subscriber;
use crate::scaleway::{
delete_environment as scw_delete, deploy_environment as scw_deploy, SCW_KUBE_TEST_CLUSTER_ID,
SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_SELF_HOSTED_DATABASE_DISK_TYPE,
SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, SCW_TEST_ZONE,
SCW_KUBE_TEST_CLUSTER_ID, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE,
SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
};
use hashicorp_vault;
use qovery_engine::build_platform::local_docker::LocalDocker;
use qovery_engine::cloud_provider::scaleway::application::Zone;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::cloud_provider::{CloudProvider, Kind};
use qovery_engine::cmd;
use qovery_engine::constants::{
AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID,
DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY,
};
use qovery_engine::error::{SimpleError, SimpleErrorKind};
use qovery_engine::models::{
Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Features, Metadata,
};
use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, Environment, Features, Metadata};
use serde::{Deserialize, Serialize};
extern crate time;
use crate::aws::{delete_environment as aws_delete, deploy_environment as aws_deploy, AWS_KUBE_TEST_CLUSTER_ID};
use crate::common::{Cluster, Infrastructure};
use crate::digitalocean::{
delete_environment as do_delete, deploy_environment as do_deploy, DO_KUBE_TEST_CLUSTER_ID,
DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE,
DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, DO_TEST_REGION,
DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
};
use qovery_engine::cloud_provider::digitalocean::application::Region;
use qovery_engine::cloud_provider::kubernetes::Kubernetes;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc};
use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, SVCItem, PVC, SVC};
use qovery_engine::dns_provider::DnsProvider;
use qovery_engine::models::DatabaseMode::MANAGED;
use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces};
use qovery_engine::object_storage::ObjectStorage;
@@ -929,14 +930,14 @@ pub fn db_fqnd(db: Database) -> String {
}
}
struct DBInfos {
db_port: u16,
db_name: String,
app_commit: String,
app_env_vars: BTreeMap<String, String>,
pub struct DBInfos {
pub db_port: u16,
pub db_name: String,
pub app_commit: String,
pub app_env_vars: BTreeMap<String, String>,
}
fn db_infos(
pub fn db_infos(
db_kind: DatabaseKind,
database_mode: DatabaseMode,
database_username: String,
@@ -1021,7 +1022,7 @@ fn db_infos(
}
}
fn db_disk_type(provider_kind: Kind, database_mode: DatabaseMode) -> String {
pub fn db_disk_type(provider_kind: Kind, database_mode: DatabaseMode) -> String {
match provider_kind {
Kind::Aws => "gp2",
Kind::Do => match database_mode {
@@ -1036,7 +1037,7 @@ fn db_disk_type(provider_kind: Kind, database_mode: DatabaseMode) -> String {
.to_string()
}
fn db_instance_type(provider_kind: Kind, db_kind: DatabaseKind, database_mode: DatabaseMode) -> String {
pub fn db_instance_type(provider_kind: Kind, db_kind: DatabaseKind, database_mode: DatabaseMode) -> String {
match provider_kind {
Kind::Aws => match db_kind {
DatabaseKind::Mongodb => "db.t3.medium",
@@ -1076,211 +1077,3 @@ pub fn get_svc_name(db_kind: DatabaseKind, provider_kind: Kind) -> &'static str
},
}
}
pub fn test_db(
context: Context,
mut environment: Environment,
secrets: FuncTestsSecrets,
version: &str,
test_name: &str,
db_kind: DatabaseKind,
provider_kind: Kind,
database_mode: DatabaseMode,
is_public: bool,
) -> String {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context_for_delete = context.clone_not_same_execution_id();
let app_id = generate_id();
let database_username = "superuser".to_string();
let database_password = generate_id();
let db_kind_str = db_kind.name().to_string();
let database_host = format!(
"{}-{}.{}",
db_kind_str.clone(),
generate_id(),
secrets.clone().DEFAULT_TEST_DOMAIN.unwrap()
);
let dyn_db_fqdn = match is_public.clone() {
true => database_host.clone(),
false => match database_mode.clone() {
DatabaseMode::MANAGED => format!("{}-dns", app_id.clone()),
DatabaseMode::CONTAINER => get_svc_name(db_kind.clone(), provider_kind.clone()).to_string(),
},
};
let db_infos = db_infos(
db_kind.clone(),
database_mode.clone(),
database_username.clone(),
database_password.clone(),
dyn_db_fqdn.clone(),
);
let database_port = db_infos.db_port.clone();
let database_db_name = db_infos.db_name.clone();
let storage_size = 10;
let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone());
let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone());
let db = Database {
kind: db_kind.clone(),
action: Action::Create,
id: app_id.clone(),
name: database_db_name.clone(),
version: version.to_string(),
fqdn_id: format!("{}-{}", db_kind_str.clone(), generate_id()),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "100m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: storage_size.clone(),
database_instance_type: db_instance_type.to_string(),
database_disk_type: db_disk_type.to_string(),
activate_high_availability: false,
activate_backups: false,
publicly_accessible: is_public.clone(),
mode: database_mode.clone(),
};
environment.databases = vec![db.clone()];
let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id());
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = app_name.clone();
app.commit_id = db_infos.app_commit.clone();
app.private_port = Some(1234);
app.dockerfile_path = Some(format!("Dockerfile-{}", version));
app.environment_vars = db_infos.app_env_vars.clone();
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = app_name.clone();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete);
match provider_kind {
Kind::Aws => match aws_deploy(&context, ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
Kind::Do => match do_deploy(&context, ea, DO_TEST_REGION) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
Kind::Scw => match scw_deploy(&context, ea, SCW_TEST_ZONE) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
}
let kube_cluster_id = match provider_kind {
Kind::Aws => AWS_KUBE_TEST_CLUSTER_ID,
Kind::Do => DO_KUBE_TEST_CLUSTER_ID,
Kind::Scw => SCW_KUBE_TEST_CLUSTER_ID,
};
match database_mode.clone() {
DatabaseMode::CONTAINER => {
match get_pvc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
match get_svc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(svc) => assert_eq!(
svc.items
.expect("No items in svc")
.into_iter()
.filter(|svc| svc
.metadata
.name
.contains(get_svc_name(db_kind.clone(), provider_kind.clone()))
&& &svc.spec.svc_type == "LoadBalancer")
.collect::<Vec<SVCItem>>()
.len(),
match is_public {
true => 1,
false => 0,
}
),
Err(_) => assert!(false),
};
}
DatabaseMode::MANAGED => {
match get_svc(
provider_kind.clone(),
kube_cluster_id.clone(),
environment.clone(),
secrets.clone(),
) {
Ok(svc) => {
let service = svc
.items
.expect("No items in svc")
.into_iter()
.filter(|svc| {
svc.metadata.name.contains(format!("{}-dns", app_id.clone()).as_str())
&& svc.spec.svc_type == "ExternalName"
})
.collect::<Vec<SVCItem>>();
let annotations = &service[0].metadata.annotations;
assert_eq!(service.len(), 1);
match is_public {
true => {
assert!(annotations.contains_key("external-dns.alpha.kubernetes.io/hostname"));
assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_host);
}
false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")),
}
}
Err(_) => assert!(false),
};
}
}
match provider_kind.clone() {
Kind::Aws => match aws_delete(&context_for_delete, ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
Kind::Do => match do_delete(&context_for_delete, ea_delete, DO_TEST_REGION) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
Kind::Scw => match scw_delete(&context_for_delete, ea_delete, SCW_TEST_ZONE) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
},
}
return test_name.to_string();
}

View File

@@ -1,23 +1,21 @@
extern crate test_utilities;
use ::function_name::named;
use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind};
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{
Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction,
};
use qovery_engine::transaction::TransactionResult;
use test_utilities::utilities::{init, FuncTestsSecrets};
use tracing::{span, Level};
use crate::aws::aws_environment::{ctx_pause_environment, delete_environment, deploy_environment};
use self::test_utilities::aws::{
AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TYPE, AWS_KUBE_TEST_CLUSTER_ID, AWS_QOVERY_ORGANIZATION_ID,
};
use self::test_utilities::utilities::{
context, engine_run_test, generate_id, get_pods, get_svc_name, is_pod_restarted_env, test_db,
context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, FuncTestsSecrets,
};
use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED};
use test_utilities::common::{test_db, Infrastructure};
/**
**
@@ -55,15 +53,15 @@ fn deploy_an_environment_with_3_databases_and_3_apps() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -103,15 +101,15 @@ fn deploy_an_environment_with_db_and_pause_it() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match ctx_pause_environment(&context, &ea) {
match environment.pause_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -120,7 +118,7 @@ fn deploy_an_environment_with_db_and_pause_it() {
// Check that we have actually 0 pods running for this db
let app_name = format!("postgresql{}-0", environment.databases[0].name);
let ret = get_pods(
ProviderKind::Aws,
Kind::Aws,
environment.clone(),
app_name.clone().as_str(),
AWS_KUBE_TEST_CLUSTER_ID,
@@ -129,7 +127,7 @@ fn deploy_an_environment_with_db_and_pause_it() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -190,10 +188,10 @@ fn postgresql_failover_dev_environment_with_all_options() {
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment.clone());
let ea_fail_ok = EnvironmentAction::EnvironmentWithFailover(environment_never_up, environment.clone());
let ea_for_deletion = EnvironmentAction::Environment(environment_delete);
let ea_fail_ok = EnvironmentAction::EnvironmentWithFailover(environment_never_up.clone(), environment.clone());
let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -201,7 +199,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("postgresql{}-0", &environment_check.databases[0].name);
match is_pod_restarted_env(
ProviderKind::Aws,
Kind::Aws,
AWS_KUBE_TEST_CLUSTER_ID,
environment_check.clone(),
database_name.as_str(),
@@ -210,14 +208,14 @@ fn postgresql_failover_dev_environment_with_all_options() {
(true, _) => assert!(true),
(false, _) => assert!(false),
}
match deploy_environment(&context, &ea_fail_ok) {
match environment_never_up.deploy_environment(Kind::Aws, &context, &ea_fail_ok) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY EVEN IF FAIL
match is_pod_restarted_env(
ProviderKind::Aws,
Kind::Aws,
AWS_KUBE_TEST_CLUSTER_ID,
environment_check.clone(),
database_name.as_str(),
@@ -227,7 +225,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_deletion, &ea_for_deletion) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -277,10 +275,10 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() {
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_for_deletion = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -292,7 +290,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, &ea_for_deletion) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -377,19 +375,19 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
let environment_to_redeploy = environment.clone();
let environment_check = environment.clone();
let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy);
let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone());
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match deploy_environment(&context_for_redeploy, &ea_redeploy) {
match environment_to_redeploy.deploy_environment(Kind::Aws, &context_for_redeploy, &ea_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -397,7 +395,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("postgresql{}-0", &environment_check.databases[0].name);
match is_pod_restarted_env(
ProviderKind::Aws,
Kind::Aws,
AWS_KUBE_TEST_CLUSTER_ID,
environment_check,
database_name.as_str(),
@@ -407,7 +405,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_delete, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,73 +1,22 @@
extern crate test_utilities;
use self::test_utilities::aws::{AWS_KUBE_TEST_CLUSTER_ID, AWS_QOVERY_ORGANIZATION_ID};
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{engine_run_test, generate_id, get_pods, is_pod_restarted_env, FuncTestsSecrets};
use self::test_utilities::common::Infrastructure;
use self::test_utilities::utilities::{
engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, FuncTestsSecrets,
};
use ::function_name::named;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{Action, Clone2, Context, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::TransactionResult;
use std::collections::BTreeMap;
use test_utilities::utilities::context;
use test_utilities::utilities::init;
use test_utilities::utilities::{context, init};
use tracing::{span, Level};
// TODO:
// - Tests that applications are always restarted when recieving a CREATE action
// see: https://github.com/Qovery/engine/pull/269
pub fn deploy_environment(context: &Context, environment_action: &EnvironmentAction) -> TransactionResult {
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let k = test_utilities::aws::aws_kubernetes_eks(&context, &cp, &dns_provider, nodes);
let _ = tx.deploy_environment_with_options(
&k,
&environment_action,
DeploymentOption {
force_build: true,
force_push: true,
},
);
tx.commit()
}
pub fn ctx_pause_environment(context: &Context, environment_action: &EnvironmentAction) -> TransactionResult {
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let k = test_utilities::aws::aws_kubernetes_eks(&context, &cp, &dns_provider, nodes);
let _ = tx.pause_environment(&k, &environment_action);
tx.commit()
}
pub fn delete_environment(context: &Context, environment_action: &EnvironmentAction) -> TransactionResult {
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let dns_provider = dns_provider_cloudflare(context);
let k = test_utilities::aws::aws_kubernetes_eks(&context, &cp, &dns_provider, nodes);
let _ = tx.delete_environment(&k, &environment_action);
tx.commit()
}
#[cfg(feature = "test-aws-self-hosted")]
#[named]
#[test]
@@ -95,16 +44,16 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() {
environment_for_delete.routers = vec![];
environment_for_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_for_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, &ea_delete) {
match environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -140,7 +89,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
let ea = EnvironmentAction::Environment(environment.clone());
let selector = format!("appId={}", environment.applications[0].id);
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -156,7 +105,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), false);
match ctx_pause_environment(&context_for_delete, &ea) {
match environment.pause_environment(Kind::Aws, &context_for_delete, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -175,7 +124,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
// Check we can resume the env
let ctx_resume = context.clone_not_same_execution_id();
match deploy_environment(&ctx_resume, &ea) {
match environment.deploy_environment(Kind::Aws, &ctx_resume, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -192,7 +141,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
assert_eq!(ret.unwrap().items.is_empty(), false);
// Cleanup
match delete_environment(&context_for_delete, &ea) {
match environment.delete_environment(Kind::Aws, &context_for_delete, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -228,16 +177,16 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
@@ -284,16 +233,16 @@ fn build_with_buildpacks_and_deploy_a_working_environment() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -340,16 +289,16 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -385,16 +334,16 @@ fn deploy_a_working_environment_with_domain() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -423,12 +372,13 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() {
&context,
AWS_QOVERY_ORGANIZATION_ID,
secrets
.clone()
.DEFAULT_TEST_DOMAIN
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -437,7 +387,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -448,18 +398,29 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the disk is here and with correct size
match get_pvc(
Kind::Aws,
AWS_KUBE_TEST_CLUSTER_ID,
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -496,7 +457,7 @@ fn redeploy_same_app_with_ebs() {
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -505,7 +466,7 @@ fn redeploy_same_app_with_ebs() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -518,15 +479,29 @@ fn redeploy_same_app_with_ebs() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea2 = EnvironmentAction::Environment(environment_redeploy);
let ea_delete = EnvironmentAction::Environment(environment_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea2 = EnvironmentAction::Environment(environment_redeploy.clone());
let ea_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match get_pvc(
Kind::Aws,
AWS_KUBE_TEST_CLUSTER_ID,
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
let app_name = format!("{}-0", &environment_check1.applications[0].name);
let (_, number) = is_pod_restarted_env(
Kind::Aws,
@@ -536,7 +511,7 @@ fn redeploy_same_app_with_ebs() {
secrets.clone(),
);
match deploy_environment(&context_bis, &ea2) {
match environment_redeploy.deploy_environment(Kind::Aws, &context_bis, &ea2) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -551,7 +526,7 @@ fn redeploy_same_app_with_ebs() {
);
//nothing change in the app, so, it shouldn't be restarted
assert!(number.eq(&number2));
match delete_environment(&context_for_deletion, &ea_delete) {
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -603,21 +578,21 @@ fn deploy_a_not_working_environment_and_after_working_environment() {
environment_for_delete.action = Action::Delete;
// environment actions
let ea = EnvironmentAction::Environment(environment);
let ea_not_working = EnvironmentAction::Environment(environment_for_not_working);
let ea_delete = EnvironmentAction::Environment(environment_for_delete);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_not_working = EnvironmentAction::Environment(environment_for_not_working.clone());
let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context_for_not_working, &ea_not_working) {
match environment_for_not_working.deploy_environment(Kind::Aws, &context_for_not_working, &ea_not_working) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, &ea_delete) {
match environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -677,40 +652,40 @@ fn deploy_ok_fail_fail_ok_environment() {
let mut delete_env = environment.clone();
delete_env.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_not_working_1 = EnvironmentAction::Environment(not_working_env_1);
let ea_not_working_2 = EnvironmentAction::Environment(not_working_env_2);
let ea_delete = EnvironmentAction::Environment(delete_env);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone());
let ea_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone());
let ea_delete = EnvironmentAction::Environment(delete_env.clone());
// OK
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// FAIL and rollback
match deploy_environment(&context_for_not_working_1, &ea_not_working_1) {
match not_working_env_1.deploy_environment(Kind::Aws, &context_for_not_working_1, &ea_not_working_1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// FAIL and Rollback again
match deploy_environment(&context_for_not_working_2, &ea_not_working_2) {
match not_working_env_2.deploy_environment(Kind::Aws, &context_for_not_working_2, &ea_not_working_2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// Should be working
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, &ea_delete) {
match delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -746,15 +721,15 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() {
let mut delete_env = environment.clone();
delete_env.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(delete_env);
let ea = EnvironmentAction::Environment(environment.clone());
let ea_delete = EnvironmentAction::Environment(delete_env.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, &ea_delete) {
match delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -798,15 +773,15 @@ fn deploy_a_non_working_environment_with_a_working_failover_on_aws_eks() {
test_domain.as_str(),
);
delete_env.action = Action::Delete;
let ea_delete = EnvironmentAction::Environment(delete_env);
let ea = EnvironmentAction::EnvironmentWithFailover(environment, failover_environment);
let ea_delete = EnvironmentAction::Environment(delete_env.clone());
let ea = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, &ea_delete) {
match delete_env.delete_environment(Kind::Aws, &context_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -875,25 +850,25 @@ fn deploy_2_non_working_environments_with_2_working_failovers_on_aws_eks() {
test_domain.as_str(),
);
delete_env.action = Action::Delete;
let ea_delete = EnvironmentAction::Environment(delete_env);
let ea_delete = EnvironmentAction::Environment(delete_env.clone());
// first deployement
let ea1 = EnvironmentAction::EnvironmentWithFailover(fail_app_1, failover_environment_1);
let ea2 = EnvironmentAction::EnvironmentWithFailover(fail_app_2, failover_environment_2);
let ea1 = EnvironmentAction::EnvironmentWithFailover(fail_app_1, failover_environment_1.clone());
let ea2 = EnvironmentAction::EnvironmentWithFailover(fail_app_2, failover_environment_2.clone());
match deploy_environment(&context_failover_1, &ea1) {
match failover_environment_1.deploy_environment(Kind::Aws, &context_failover_1, &ea1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context_failover_2, &ea2) {
match failover_environment_2.deploy_environment(Kind::Aws, &context_failover_2, &ea2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, &ea_delete) {
match delete_env.delete_environment(Kind::Aws, &context_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -931,15 +906,15 @@ fn deploy_a_non_working_environment_with_a_non_working_failover_on_aws_eks() {
);
delete_env.action = Action::Delete;
// environment action initialize
let ea_delete = EnvironmentAction::Environment(delete_env);
let ea = EnvironmentAction::EnvironmentWithFailover(environment, failover_environment);
let ea_delete = EnvironmentAction::Environment(delete_env.clone());
let ea = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, &ea) {
match environment.deploy_environment(Kind::Aws, &context, &ea) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_deletion, &ea_delete) {
match delete_env.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,183 +1,34 @@
extern crate test_utilities;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, init, FuncTestsSecrets};
use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION};
use self::test_utilities::utilities::{engine_run_test, FuncTestsSecrets};
use ::function_name::named;
use tracing::{span, Level};
use self::test_utilities::aws::eks_options;
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode;
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways};
use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS};
use qovery_engine::transaction::TransactionResult;
#[allow(dead_code)]
fn create_upgrade_and_destroy_eks_cluster(
region: &str,
secrets: FuncTestsSecrets,
boot_version: &str,
upgrade_to_version: &str,
test_name: &str,
) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let aws = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = EKS::new(
context.clone(),
generate_cluster_id(region).as_str(),
uuid::Uuid::new_v4(),
generate_cluster_id(region).as_str(),
boot_version,
region,
&aws,
&cloudflare,
eks_options(secrets.clone()),
nodes.clone(),
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Upgrade
let kubernetes = EKS::new(
context,
generate_cluster_id(region).as_str(),
uuid::Uuid::new_v4(),
generate_cluster_id(region).as_str(),
upgrade_to_version,
region,
&aws,
&cloudflare,
eks_options(secrets),
nodes,
)
.unwrap();
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
use qovery_engine::cloud_provider::Kind;
use test_utilities::common::{cluster_test, ClusterTestType};
#[cfg(feature = "test-aws-infra")]
fn create_and_destroy_eks_cluster(
region: &str,
secrets: FuncTestsSecrets,
test_infra_pause: bool,
test_type: ClusterTestType,
major_boot_version: u8,
minor_boot_version: u8,
vpc_network_mode: VpcQoveryNetworkMode,
test_name: &str,
) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::aws::docker_ecr_aws_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let aws = test_utilities::aws::cloud_provider_aws(&context);
let nodes = test_utilities::aws::aws_kubernetes_nodes();
let mut eks_options = eks_options(secrets);
eks_options.vpc_qovery_network_mode = vpc_network_mode;
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = EKS::new(
context,
generate_cluster_id(region).as_str(),
uuid::Uuid::new_v4(),
generate_cluster_id(region).as_str(),
test_utilities::aws::AWS_KUBERNETES_VERSION,
cluster_test(
test_name,
Kind::Aws,
region,
&aws,
&cloudflare,
eks_options,
nodes,
secrets,
test_type,
major_boot_version,
minor_boot_version,
Option::from(vpc_network_mode),
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
if test_infra_pause {
// Pause
if let Err(err) = tx.pause_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Resume
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
@@ -192,7 +43,15 @@ fn create_and_destroy_eks_cluster(
fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() {
let region = "eu-west-3";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, false, WithoutNatGateways, function_name!());
create_and_destroy_eks_cluster(
&region,
secrets,
ClusterTestType::Classic,
AWS_KUBERNETES_MAJOR_VERSION,
AWS_KUBERNETES_MINOR_VERSION,
WithoutNatGateways,
function_name!(),
);
}
#[cfg(feature = "test-aws-infra")]
@@ -201,7 +60,15 @@ fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() {
fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() {
let region = "eu-west-3";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, false, WithNatGateways, function_name!());
create_and_destroy_eks_cluster(
&region,
secrets,
ClusterTestType::Classic,
AWS_KUBERNETES_MAJOR_VERSION,
AWS_KUBERNETES_MINOR_VERSION,
WithNatGateways,
function_name!(),
);
}
#[cfg(feature = "test-aws-infra")]
@@ -210,16 +77,33 @@ fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() {
fn create_and_destroy_eks_cluster_in_us_east_2() {
let region = "us-east-2";
let secrets = FuncTestsSecrets::new();
create_and_destroy_eks_cluster(&region, secrets, true, WithoutNatGateways, function_name!());
create_and_destroy_eks_cluster(
&region,
secrets,
ClusterTestType::Classic,
AWS_KUBERNETES_MAJOR_VERSION,
AWS_KUBERNETES_MINOR_VERSION,
WithoutNatGateways,
function_name!(),
);
}
// only enable this test manually when we want to perform and validate upgrade process
#[allow(dead_code)]
#[cfg(feature = "test-aws-infra")]
#[named]
#[test]
#[ignore]
fn create_upgrade_and_destroy_eks_cluster_in_eu_west_3() {
let region = "eu-west-3";
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_eks_cluster(&region, secrets, "1.18", "1.19", function_name!());
create_and_destroy_eks_cluster(
&region,
secrets,
ClusterTestType::WithUpgrade,
AWS_KUBERNETES_MAJOR_VERSION,
AWS_KUBERNETES_MINOR_VERSION,
WithoutNatGateways,
function_name!(),
);
}

View File

@@ -7,16 +7,15 @@ use qovery_engine::models::{
};
use qovery_engine::transaction::TransactionResult;
use test_utilities::utilities::{
context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, test_db,
FuncTestsSecrets,
context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, FuncTestsSecrets,
};
use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED};
use test_utilities::common::working_minimal_environment;
use test_utilities::common::{test_db, working_minimal_environment, Infrastructure};
use test_utilities::digitalocean::{
clean_environments, delete_environment, deploy_environment, pause_environment, DO_KUBE_TEST_CLUSTER_ID,
DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_QOVERY_ORGANIZATION_ID,
DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, DO_TEST_REGION,
clean_environments, DO_KUBE_TEST_CLUSTER_ID, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE,
DO_QOVERY_ORGANIZATION_ID, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
DO_TEST_REGION,
};
/**
@@ -56,15 +55,15 @@ fn deploy_an_environment_with_3_databases_and_3_apps() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, env_action_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -109,15 +108,15 @@ fn deploy_an_environment_with_db_and_pause_it() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action.clone(), DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match pause_environment(&context, env_action, DO_TEST_REGION) {
match environment.pause_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -135,7 +134,7 @@ fn deploy_an_environment_with_db_and_pause_it() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
match delete_environment(&context_for_deletion, env_action_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -201,10 +200,11 @@ fn postgresql_failover_dev_environment_with_all_options() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_fail_ok = EnvironmentAction::EnvironmentWithFailover(environment_never_up, environment.clone());
let env_action_fail_ok =
EnvironmentAction::EnvironmentWithFailover(environment_never_up.clone(), environment.clone());
let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -221,7 +221,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
(true, _) => assert!(true),
(false, _) => assert!(false),
}
match deploy_environment(&context, env_action_fail_ok, DO_TEST_REGION) {
match environment_never_up.deploy_environment(Kind::Do, &context, &env_action_fail_ok) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -238,7 +238,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_deletion, env_action_for_deletion, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -296,7 +296,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() {
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -308,7 +308,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, env_action_for_deletion, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -354,7 +354,6 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
.as_str(),
);
let database_mode = CONTAINER;
let app_name = format!("postgresql-app-{}", generate_id());
@@ -415,19 +414,19 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
let environment_to_redeploy = environment.clone();
let environment_check = environment.clone();
let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy);
let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone());
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match deploy_environment(&context_for_redeploy, env_action_redeploy, DO_TEST_REGION) {
match environment_to_redeploy.deploy_environment(Kind::Do, &context_for_redeploy, &env_action_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -445,7 +444,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_delete, env_action_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,17 +1,17 @@
extern crate test_utilities;
use self::test_utilities::digitalocean::{
clean_environments, delete_environment, deploy_environment, pause_environment, DO_KUBE_TEST_CLUSTER_ID,
DO_QOVERY_ORGANIZATION_ID, DO_TEST_REGION,
clean_environments, DO_KUBE_TEST_CLUSTER_ID, DO_QOVERY_ORGANIZATION_ID, DO_TEST_REGION,
};
use self::test_utilities::utilities::{
engine_run_test, generate_id, get_pods, init, is_pod_restarted_env, FuncTestsSecrets,
engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, FuncTestsSecrets,
};
use ::function_name::named;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::TransactionResult;
use std::collections::BTreeMap;
use test_utilities::common::Infrastructure;
use test_utilities::utilities::context;
use tracing::{span, warn, Level};
@@ -48,15 +48,15 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, DO_TEST_REGION) {
match environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -100,15 +100,15 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, env_action_for_delete, DO_TEST_REGION) {
match environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
@@ -149,7 +149,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() {
let env_action = EnvironmentAction::Environment(environment.clone());
let selector = format!("appId={}", environment.applications[0].id);
match deploy_environment(&context, env_action.clone(), DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -165,7 +165,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), false);
match pause_environment(&context_for_delete, env_action.clone(), DO_TEST_REGION) {
match environment.pause_environment(Kind::Do, &context_for_delete, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -184,7 +184,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() {
// Check we can resume the env
let ctx_resume = context.clone_not_same_execution_id();
match deploy_environment(&ctx_resume, env_action.clone(), DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &ctx_resume, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -201,7 +201,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() {
assert_eq!(ret.unwrap().items.is_empty(), false);
// Cleanup
match delete_environment(&context_for_delete, env_action.clone(), DO_TEST_REGION) {
match environment.delete_environment(Kind::Do, &context_for_delete, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -254,15 +254,15 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, DO_TEST_REGION) {
match environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -304,15 +304,15 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -351,7 +351,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() {
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -360,7 +360,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -372,17 +372,23 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO(benjaminch): check the disk is here and with correct size, can use DigitalOcean API
match get_pvc(Kind::Do, DO_KUBE_TEST_CLUSTER_ID, environment.clone(), secrets.clone()) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
match delete_environment(&context_for_deletion, env_action_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -422,7 +428,7 @@ fn digitalocean_doks_redeploy_same_app() {
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -431,7 +437,7 @@ fn digitalocean_doks_redeploy_same_app() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -446,15 +452,23 @@ fn digitalocean_doks_redeploy_same_app() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy);
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match get_pvc(Kind::Do, DO_KUBE_TEST_CLUSTER_ID, environment.clone(), secrets.clone()) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
let app_name = format!("{}-0", &environment_check1.applications[0].name);
let (_, number) = is_pod_restarted_env(
Kind::Do,
@@ -464,7 +478,7 @@ fn digitalocean_doks_redeploy_same_app() {
secrets.clone(),
);
match deploy_environment(&context_bis, env_action_redeploy, DO_TEST_REGION) {
match environment_redeploy.deploy_environment(Kind::Do, &context_bis, &env_action_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -481,7 +495,7 @@ fn digitalocean_doks_redeploy_same_app() {
// nothing changed in the app, so, it shouldn't be restarted
assert!(number.eq(&number2));
match delete_environment(&context_for_deletion, env_action_delete, DO_TEST_REGION) {
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -540,20 +554,24 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme
// environment actions
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working);
let env_action_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone());
let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context_for_not_working, env_action_not_working, DO_TEST_REGION) {
match environment_for_not_working.deploy_environment(
Kind::Do,
&context_for_not_working,
&env_action_not_working,
) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete, DO_TEST_REGION) {
match environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -617,39 +635,39 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() {
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1);
let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2);
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone());
let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone());
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
// OK
match deploy_environment(&context, env_action.clone(), DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// FAIL and rollback
match deploy_environment(&context_for_not_working_1, env_action_not_working_1, DO_TEST_REGION) {
match not_working_env_1.deploy_environment(Kind::Do, &context_for_not_working_1, &env_action_not_working_1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// FAIL and Rollback again
match deploy_environment(&context_for_not_working_2, env_action_not_working_2, DO_TEST_REGION) {
match not_working_env_2.deploy_environment(Kind::Do, &context_for_not_working_2, &env_action_not_working_2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// Should be working
match deploy_environment(&context, env_action.clone(), DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete, DO_TEST_REGION) {
match delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -691,15 +709,15 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() {
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, env_action_delete, DO_TEST_REGION) {
match delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -749,16 +767,16 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_a_working_failover()
);
delete_env.action = Action::Delete;
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
let env_action = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, env_action_delete, DO_TEST_REGION) {
match delete_env.delete_environment(Kind::Do, &context_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -809,16 +827,16 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_a_non_working_failove
delete_env.action = Action::Delete;
// environment action initialize
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
let env_action = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, env_action, DO_TEST_REGION) {
match environment.deploy_environment(Kind::Do, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_deletion, env_action_delete, DO_TEST_REGION) {
match delete_env.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,168 +1,33 @@
extern crate test_utilities;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, init, FuncTestsSecrets};
use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION};
use self::test_utilities::utilities::{engine_run_test, FuncTestsSecrets};
use ::function_name::named;
use tracing::{span, Level};
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::transaction::TransactionResult;
use qovery_engine::cloud_provider::digitalocean::application::Region;
use test_utilities::digitalocean::DO_KUBERNETES_VERSION;
use qovery_engine::cloud_provider::Kind;
use test_utilities::common::{cluster_test, ClusterTestType};
#[allow(dead_code)]
fn create_upgrade_and_destroy_doks_cluster(
#[cfg(feature = "test-do-infra")]
fn create_and_destroy_doks_cluster(
region: Region,
secrets: FuncTestsSecrets,
boot_version: &str,
_upgrade_to_version: &str,
test_type: ClusterTestType,
major_boot_version: u8,
minor_boot_version: u8,
test_name: &str,
) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::digitalocean::docker_cr_do_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let do_cluster = test_utilities::digitalocean::cloud_provider_digitalocean(&context);
let nodes = test_utilities::digitalocean::do_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let cluster_id = generate_cluster_id(region.as_str());
let kubernetes = DOKS::new(
context,
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_id.clone(),
boot_version.to_string(),
region,
&do_cluster,
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(secrets, cluster_id),
cluster_test(
test_name,
Kind::Do,
region.as_str(),
secrets,
test_type,
major_boot_version,
minor_boot_version,
None,
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Upgrade
// TODO(benjaminch): To be added
//let kubernetes = ...
// if let Err(err) = tx.create_kubernetes(&kubernetes) {
// panic!("{:?}", err)
// }
// let _ = match tx.commit() {
// TransactionResult::Ok => assert!(true),
// TransactionResult::Rollback(_) => assert!(false),
// TransactionResult::UnrecoverableError(_, _) => assert!(false),
// };
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
});
}
#[allow(dead_code)]
fn create_and_destroy_doks_cluster(region: Region, secrets: FuncTestsSecrets, test_infra_pause: bool, test_name: &str) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::digitalocean::docker_cr_do_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let do_cluster = test_utilities::digitalocean::cloud_provider_digitalocean(&context);
let nodes = test_utilities::digitalocean::do_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let cluster_id = generate_cluster_id(region.as_str());
let kubernetes = DOKS::new(
context,
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_id.clone(),
DO_KUBERNETES_VERSION.to_string(),
region,
&do_cluster,
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(secrets, cluster_id),
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
if test_infra_pause {
// Pause
if let Err(err) = tx.pause_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Resume
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
});
})
}
#[cfg(feature = "test-do-infra")]
@@ -171,14 +36,29 @@ fn create_and_destroy_doks_cluster(region: Region, secrets: FuncTestsSecrets, te
fn create_and_destroy_doks_cluster_ams_3() {
let region = Region::Amsterdam3;
let secrets = FuncTestsSecrets::new();
create_and_destroy_doks_cluster(region, secrets, false, function_name!());
create_and_destroy_doks_cluster(
region,
secrets,
ClusterTestType::Classic,
DO_KUBERNETES_MAJOR_VERSION,
DO_KUBERNETES_MINOR_VERSION,
function_name!(),
);
}
#[cfg(feature = "test-do-infra")]
#[named]
#[test]
#[ignore]
fn create_upgrade_and_destroy_doks_cluster_in_nyc_3() {
let region = Region::NewYorkCity3;
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_doks_cluster(region, secrets, "1.19", "1.20", function_name!());
create_and_destroy_doks_cluster(
region,
secrets,
ClusterTestType::Classic,
DO_KUBERNETES_MAJOR_VERSION,
DO_KUBERNETES_MINOR_VERSION,
function_name!(),
);
}

View File

@@ -3,8 +3,10 @@ extern crate test_utilities;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{context, engine_run_test, init, FuncTestsSecrets};
use ::function_name::named;
use qovery_engine::cloud_provider::digitalocean::DO;
use tracing::{span, Level};
use self::test_utilities::common::Cluster;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::transaction::TransactionResult;
@@ -26,12 +28,12 @@ fn create_digitalocean_kubernetes_doks_test_cluster() {
let _enter = span.enter();
let context = context();
let engine = test_utilities::digitalocean::docker_cr_do_engine(&context);
let engine = DO::docker_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let do_cluster = test_utilities::digitalocean::cloud_provider_digitalocean(&context);
let nodes = test_utilities::digitalocean::do_kubernetes_nodes();
let do_cluster = DO::cloud_provider(&context);
let nodes = DO::kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = DOKS::new(
@@ -41,12 +43,12 @@ fn create_digitalocean_kubernetes_doks_test_cluster() {
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
test_utilities::digitalocean::DO_KUBERNETES_VERSION.to_string(),
test_utilities::digitalocean::DO_TEST_REGION,
&do_cluster,
do_cluster.as_ref(),
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(
DO::kubernetes_cluster_options(
secrets,
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
Option::from(test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string()),
),
)
.unwrap();
@@ -83,12 +85,12 @@ fn destroy_digitalocean_kubernetes_doks_test_cluster() {
let _enter = span.enter();
let context = context();
let engine = test_utilities::digitalocean::docker_cr_do_engine(&context);
let engine = DO::docker_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let do_cluster = test_utilities::digitalocean::cloud_provider_digitalocean(&context);
let nodes = test_utilities::digitalocean::do_kubernetes_nodes();
let do_cluster = DO::cloud_provider(&context);
let nodes = DO::kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = DOKS::new(
@@ -98,12 +100,12 @@ fn destroy_digitalocean_kubernetes_doks_test_cluster() {
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
test_utilities::digitalocean::DO_KUBERNETES_VERSION.to_string(),
test_utilities::digitalocean::DO_TEST_REGION,
&do_cluster,
do_cluster.as_ref(),
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(
DO::kubernetes_cluster_options(
secrets,
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
Option::from(test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string()),
),
)
.unwrap();

View File

@@ -8,15 +8,16 @@ use qovery_engine::models::{
use qovery_engine::transaction::TransactionResult;
use test_utilities::utilities::{
context, engine_run_test, generate_id, generate_password, get_pods, get_svc_name, init, is_pod_restarted_env,
test_db, FuncTestsSecrets,
FuncTestsSecrets,
};
use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED};
use test_utilities::common::working_minimal_environment;
use test_utilities::common::Infrastructure;
use test_utilities::common::{test_db, working_minimal_environment};
use test_utilities::scaleway::{
clean_environments, delete_environment, deploy_environment, pause_environment, SCW_KUBE_TEST_CLUSTER_ID,
SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_QOVERY_ORGANIZATION_ID,
SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, SCW_TEST_ZONE,
clean_environments, SCW_KUBE_TEST_CLUSTER_ID, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE,
SCW_QOVERY_ORGANIZATION_ID, SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
SCW_TEST_ZONE,
};
/**
@@ -56,15 +57,15 @@ fn deploy_an_environment_with_3_databases_and_3_apps() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, env_action_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -109,15 +110,15 @@ fn deploy_an_environment_with_db_and_pause_it() {
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action.clone(), SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match pause_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.pause_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -135,7 +136,7 @@ fn deploy_an_environment_with_db_and_pause_it() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
match delete_environment(&context_for_deletion, env_action_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -204,7 +205,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
let env_action_fail_ok = EnvironmentAction::EnvironmentWithFailover(environment_never_up, environment.clone());
let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -221,7 +222,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
(true, _) => assert!(true),
(false, _) => assert!(false),
}
match deploy_environment(&context, env_action_fail_ok, SCW_TEST_ZONE) {
match environment_check.deploy_environment(Kind::Scw, &context, &env_action_fail_ok) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -238,7 +239,7 @@ fn postgresql_failover_dev_environment_with_all_options() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_deletion, env_action_for_deletion, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -295,13 +296,13 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() {
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_deletion, env_action_for_deletion, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -409,19 +410,19 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
let environment_to_redeploy = environment.clone();
let environment_check = environment.clone();
let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy);
let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone());
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match deploy_environment(&context_for_redeploy, env_action_redeploy, SCW_TEST_ZONE) {
match environment_to_redeploy.deploy_environment(Kind::Scw, &context_for_redeploy, &env_action_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -439,7 +440,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
(false, _) => assert!(false),
}
match delete_environment(&context_for_delete, env_action_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,18 +1,17 @@
extern crate test_utilities;
use self::test_utilities::scaleway::{
clean_environments, delete_environment, deploy_environment, pause_environment, SCW_KUBE_TEST_CLUSTER_ID,
SCW_QOVERY_ORGANIZATION_ID, SCW_TEST_ZONE,
clean_environments, SCW_KUBE_TEST_CLUSTER_ID, SCW_QOVERY_ORGANIZATION_ID, SCW_TEST_ZONE,
};
use self::test_utilities::utilities::{
engine_run_test, generate_id, get_pods, init, is_pod_restarted_env, FuncTestsSecrets,
context, engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, FuncTestsSecrets,
};
use ::function_name::named;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::TransactionResult;
use std::collections::BTreeMap;
use test_utilities::utilities::context;
use test_utilities::common::Infrastructure;
use tracing::{span, warn, Level};
// Note: All those tests relies on a test cluster running on Scaleway infrastructure.
@@ -48,15 +47,15 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, SCW_TEST_ZONE) {
match environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -100,15 +99,15 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, env_action_for_delete, SCW_TEST_ZONE) {
match environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
@@ -149,7 +148,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() {
let env_action = EnvironmentAction::Environment(environment.clone());
let selector = format!("appId={}", environment.applications[0].id);
match deploy_environment(&context, env_action.clone(), SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -165,7 +164,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() {
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), false);
match pause_environment(&context_for_delete, env_action.clone(), SCW_TEST_ZONE) {
match environment.pause_environment(Kind::Scw, &context_for_delete, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -184,7 +183,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() {
// Check we can resume the env
let ctx_resume = context.clone_not_same_execution_id();
match deploy_environment(&ctx_resume, env_action.clone(), SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &ctx_resume, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -201,7 +200,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() {
assert_eq!(ret.unwrap().items.is_empty(), false);
// Cleanup
match delete_environment(&context_for_delete, env_action.clone(), SCW_TEST_ZONE) {
match environment.delete_environment(Kind::Scw, &context_for_delete, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -254,15 +253,15 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() {
environment_for_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, SCW_TEST_ZONE) {
match environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -304,15 +303,15 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_for_delete = EnvironmentAction::Environment(environment_delete);
let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_for_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -351,7 +350,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() {
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -360,7 +359,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -372,17 +371,28 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO(benjaminch): check the disk is here and with correct size, can use Scaleway API
match get_pvc(
Kind::Scw,
SCW_KUBE_TEST_CLUSTER_ID,
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
match delete_environment(&context_for_deletion, env_action_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -416,13 +426,14 @@ fn scaleway_kapsule_redeploy_same_app() {
&context,
SCW_QOVERY_ORGANIZATION_ID,
secrets
.clone()
.DEFAULT_TEST_DOMAIN
.as_ref()
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
.as_str(),
);
// Todo: make an image that check there is a mounted disk
let storage_size: u16 = 10;
environment.applications = environment
.applications
.into_iter()
@@ -431,7 +442,7 @@ fn scaleway_kapsule_redeploy_same_app() {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
size_in_gib: storage_size,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
@@ -446,15 +457,28 @@ fn scaleway_kapsule_redeploy_same_app() {
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy);
let env_action_delete = EnvironmentAction::Environment(environment_delete);
let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone());
let env_action_delete = EnvironmentAction::Environment(environment_delete.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match get_pvc(
Kind::Scw,
SCW_KUBE_TEST_CLUSTER_ID,
environment.clone(),
secrets.clone(),
) {
Ok(pvc) => assert_eq!(
pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage,
format!("{}Gi", storage_size)
),
Err(_) => assert!(false),
};
let app_name = format!("{}-0", &environment_check1.applications[0].name);
let (_, number) = is_pod_restarted_env(
Kind::Scw,
@@ -464,7 +488,7 @@ fn scaleway_kapsule_redeploy_same_app() {
secrets.clone(),
);
match deploy_environment(&context_bis, env_action_redeploy, SCW_TEST_ZONE) {
match environment_redeploy.deploy_environment(Kind::Scw, &context_bis, &env_action_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -481,7 +505,7 @@ fn scaleway_kapsule_redeploy_same_app() {
// nothing changed in the app, so, it shouldn't be restarted
assert!(number.eq(&number2));
match delete_environment(&context_for_deletion, env_action_delete, SCW_TEST_ZONE) {
match environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -540,20 +564,24 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen
// environment actions
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working);
let env_action_delete = EnvironmentAction::Environment(environment_for_delete);
let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone());
let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone());
match deploy_environment(&context_for_not_working, env_action_not_working, SCW_TEST_ZONE) {
match environment_for_not_working.deploy_environment(
Kind::Scw,
&context_for_not_working,
&env_action_not_working,
) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete, SCW_TEST_ZONE) {
match environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -617,39 +645,39 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() {
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1);
let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2);
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone());
let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone());
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
// OK
match deploy_environment(&context, env_action.clone(), SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// FAIL and rollback
match deploy_environment(&context_for_not_working_1, env_action_not_working_1, SCW_TEST_ZONE) {
match not_working_env_1.deploy_environment(Kind::Scw, &context_for_not_working_1, &env_action_not_working_1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// FAIL and Rollback again
match deploy_environment(&context_for_not_working_2, env_action_not_working_2, SCW_TEST_ZONE) {
match not_working_env_2.deploy_environment(Kind::Scw, &context_for_not_working_2, &env_action_not_working_2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// Should be working
match deploy_environment(&context, env_action.clone(), SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete, SCW_TEST_ZONE) {
match delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -691,15 +719,15 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() {
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment.clone());
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, env_action_delete, SCW_TEST_ZONE) {
match delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -749,16 +777,16 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_a_working_failover() {
);
delete_env.action = Action::Delete;
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
let env_action = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, env_action_delete, SCW_TEST_ZONE) {
match delete_env.delete_environment(Kind::Scw, &context_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
@@ -809,16 +837,16 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_a_non_working_failover
delete_env.action = Action::Delete;
// environment action initialize
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action_delete = EnvironmentAction::Environment(delete_env.clone());
let env_action = EnvironmentAction::EnvironmentWithFailover(environment.clone(), failover_environment.clone());
match deploy_environment(&context, env_action, SCW_TEST_ZONE) {
match environment.deploy_environment(Kind::Scw, &context, &env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_deletion, env_action_delete, SCW_TEST_ZONE) {
match delete_env.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),

View File

@@ -1,168 +1,35 @@
extern crate test_utilities;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION};
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, init, FuncTestsSecrets};
use ::function_name::named;
use tracing::{span, Level};
use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode;
use qovery_engine::cloud_provider::scaleway::application::Zone;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::transaction::TransactionResult;
use qovery_engine::cloud_provider::Kind;
use test_utilities::common::{cluster_test, ClusterTestType};
use test_utilities::scaleway::SCW_KUBERNETES_VERSION;
#[allow(dead_code)]
fn create_upgrade_and_destroy_kapsule_cluster(
#[cfg(feature = "test-scw-infra")]
fn create_and_destroy_kapsule_cluster(
zone: Zone,
secrets: FuncTestsSecrets,
boot_version: &str,
_upgrade_to_version: &str,
test_type: ClusterTestType,
major_boot_version: u8,
minor_boot_version: u8,
test_name: &str,
vpc_network_mode: Option<VpcQoveryNetworkMode>,
) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::scaleway::docker_scw_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let scw_cluster = test_utilities::scaleway::cloud_provider_scaleway(&context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let cluster_id = generate_cluster_id(zone.as_str());
let kubernetes = Kapsule::new(
context,
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_id,
boot_version.to_string(),
zone,
&scw_cluster,
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
cluster_test(
test_name,
Kind::Scw,
zone.as_str(),
secrets,
test_type,
major_boot_version,
minor_boot_version,
vpc_network_mode,
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Upgrade
// TODO(benjaminch): To be added
//let kubernetes = ...
// if let Err(err) = tx.create_kubernetes(&kubernetes) {
// panic!("{:?}", err)
// }
// let _ = match tx.commit() {
// TransactionResult::Ok => assert!(true),
// TransactionResult::Rollback(_) => assert!(false),
// TransactionResult::UnrecoverableError(_, _) => assert!(false),
// };
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
});
}
#[allow(dead_code)]
fn create_and_destroy_kapsule_cluster(zone: Zone, secrets: FuncTestsSecrets, test_infra_pause: bool, test_name: &str) {
engine_run_test(|| {
init();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let engine = test_utilities::scaleway::docker_scw_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let scw_cluster = test_utilities::scaleway::cloud_provider_scaleway(&context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let cluster_id = generate_cluster_id(zone.as_str());
let kubernetes = Kapsule::new(
context,
cluster_id.clone(),
uuid::Uuid::new_v4(),
cluster_id,
SCW_KUBERNETES_VERSION.to_string(),
zone,
&scw_cluster,
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
if test_infra_pause {
// Pause
if let Err(err) = tx.pause_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Resume
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
let _ = match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
match tx.commit() {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
});
})
}
#[cfg(feature = "test-scw-infra")]
@@ -172,7 +39,15 @@ fn create_and_destroy_kapsule_cluster(zone: Zone, secrets: FuncTestsSecrets, tes
fn create_and_destroy_kapsule_cluster_par_1() {
let zone = Zone::Paris1;
let secrets = FuncTestsSecrets::new();
create_and_destroy_kapsule_cluster(zone, secrets, false, function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::Classic,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
#[cfg(feature = "test-scw-infra")]
@@ -181,7 +56,15 @@ fn create_and_destroy_kapsule_cluster_par_1() {
fn create_and_destroy_kapsule_cluster_par_2() {
let zone = Zone::Paris2;
let secrets = FuncTestsSecrets::new();
create_and_destroy_kapsule_cluster(zone, secrets, false, function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::Classic,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
#[cfg(feature = "test-scw-infra")]
@@ -191,7 +74,15 @@ fn create_and_destroy_kapsule_cluster_par_2() {
fn create_and_destroy_kapsule_cluster_ams_1() {
let zone = Zone::Amsterdam1;
let secrets = FuncTestsSecrets::new();
create_and_destroy_kapsule_cluster(zone, secrets, false, function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::Classic,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
#[cfg(feature = "test-scw-infra")]
@@ -200,45 +91,89 @@ fn create_and_destroy_kapsule_cluster_ams_1() {
fn create_and_destroy_kapsule_cluster_war_1() {
let zone = Zone::Warsaw1;
let secrets = FuncTestsSecrets::new();
create_and_destroy_kapsule_cluster(zone, secrets, false, function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::Classic,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
// only enable this test manually when we want to perform and validate upgrade process
#[cfg(feature = "test-scw-infra")]
#[test]
#[ignore]
#[named]
#[ignore]
fn create_upgrade_and_destroy_kapsule_cluster_in_par_1() {
let zone = Zone::Paris1;
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_kapsule_cluster(zone, secrets, "1.18", "1.19", function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::WithUpgrade,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
// only enable this test manually when we want to perform and validate upgrade process
#[cfg(feature = "test-scw-infra")]
#[test]
#[ignore]
#[named]
#[ignore]
fn create_upgrade_and_destroy_kapsule_cluster_in_par_2() {
let zone = Zone::Paris2;
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_kapsule_cluster(zone, secrets, "1.18", "1.19", function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::WithUpgrade,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
// only enable this test manually when we want to perform and validate upgrade process
#[cfg(feature = "test-scw-infra")]
#[test]
#[ignore]
#[named]
#[ignore]
fn create_upgrade_and_destroy_kapsule_cluster_in_ams_1() {
let zone = Zone::Amsterdam1;
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_kapsule_cluster(zone, secrets, "1.18", "1.19", function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::WithUpgrade,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}
// only enable this test manually when we want to perform and validate upgrade process
#[cfg(feature = "test-scw-infra")]
#[test]
#[ignore]
#[named]
#[ignore]
fn create_upgrade_and_destroy_kapsule_cluster_in_war_1() {
let zone = Zone::Warsaw1;
let secrets = FuncTestsSecrets::new();
create_upgrade_and_destroy_kapsule_cluster(zone, secrets, "1.18", "1.19", function_name!());
create_and_destroy_kapsule_cluster(
zone,
secrets,
ClusterTestType::WithUpgrade,
SCW_KUBERNETES_MAJOR_VERSION,
SCW_KUBERNETES_MINOR_VERSION,
function_name!(),
None,
);
}

View File

@@ -5,7 +5,9 @@ use self::test_utilities::utilities::{context, engine_run_test, init, FuncTestsS
use ::function_name::named;
use tracing::{span, Level};
use self::test_utilities::common::Cluster;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::transaction::TransactionResult;
// Warning: This test shouldn't be ran by CI
@@ -26,12 +28,12 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() {
let _enter = span.enter();
let context = context();
let engine = test_utilities::scaleway::docker_scw_cr_engine(&context);
let engine = Scaleway::docker_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let scw_cluster = test_utilities::scaleway::cloud_provider_scaleway(&context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let scw_cluster = Scaleway::cloud_provider(&context);
let nodes = Scaleway::kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = Kapsule::new(
@@ -41,10 +43,10 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() {
test_utilities::scaleway::SCW_KUBE_TEST_CLUSTER_NAME.to_string(),
test_utilities::scaleway::SCW_KUBERNETES_VERSION.to_string(),
test_utilities::scaleway::SCW_TEST_ZONE,
&scw_cluster,
scw_cluster.as_ref(),
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
Scaleway::kubernetes_cluster_options(secrets, None),
)
.unwrap();
@@ -80,12 +82,12 @@ fn destroy_scaleway_kubernetes_kapsule_test_cluster() {
let _enter = span.enter();
let context = context();
let engine = test_utilities::scaleway::docker_scw_cr_engine(&context);
let engine = Scaleway::docker_cr_engine(&context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let scw_cluster = test_utilities::scaleway::cloud_provider_scaleway(&context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let scw_cluster = Scaleway::cloud_provider(&context);
let nodes = Scaleway::kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = Kapsule::new(
@@ -95,10 +97,10 @@ fn destroy_scaleway_kubernetes_kapsule_test_cluster() {
test_utilities::scaleway::SCW_KUBE_TEST_CLUSTER_NAME.to_string(),
test_utilities::scaleway::SCW_KUBERNETES_VERSION.to_string(),
test_utilities::scaleway::SCW_TEST_ZONE,
&scw_cluster,
scw_cluster.as_ref(),
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
Scaleway::kubernetes_cluster_options(secrets, None),
)
.unwrap();