From 8d8e9f0642e83873f4127225a814f7027109a2fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 8 Mar 2022 16:28:15 +0100 Subject: [PATCH 01/85] Build: Properly propagate cancel error when killed (#634) --- src/build_platform/local_docker.rs | 3 +++ src/transaction.rs | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 168d565b..f341680d 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,6 +6,7 @@ use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, CacheResult, Credentials, Image, Kind}; +use crate::cmd::command::CommandError::Killed; use crate::cmd::command::QoveryCommand; use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; @@ -186,6 +187,7 @@ impl LocalDocker { match exit_status { Ok(_) => Ok(BuildResult { build }), + Err(Killed(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), Err(err) => Err(EngineError::new_docker_cannot_build_container_image( self.get_event_details(), self.name_with_id(), @@ -343,6 +345,7 @@ impl LocalDocker { match exit_status { Ok(_) => Ok(BuildResult { build }), + Err(Killed(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), Err(err) => { let error = EngineError::new_buildpack_cannot_build_container_image( self.get_event_details(), diff --git a/src/transaction.rs b/src/transaction.rs index 8045a5e8..3f69c2dc 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -353,6 +353,10 @@ impl<'a> Transaction<'a> { }; } Step::BuildEnvironment(environment_action, option) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // build applications let target_environment = match environment_action { EnvironmentAction::Environment(te) => te, @@ -404,6 +408,10 @@ impl<'a> Transaction<'a> { applications_by_environment.insert(target_environment, applications); } Step::DeployEnvironment(environment_action) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // deploy complete environment match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { self.engine.kubernetes().deploy_environment(qe_env) @@ -416,6 +424,10 @@ impl<'a> Transaction<'a> { }; } Step::PauseEnvironment(environment_action) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // pause complete environment match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { self.engine.kubernetes().pause_environment(qe_env) @@ -428,6 +440,10 @@ impl<'a> Transaction<'a> { }; } Step::DeleteEnvironment(environment_action) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // delete complete environment match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { self.engine.kubernetes().delete_environment(qe_env) From cf6060ff13b10c0c1e4fd9d89b57622cff446942 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 8 Mar 2022 16:40:50 +0100 Subject: [PATCH 02/85] Fix compilation --- src/build_platform/local_docker.rs | 76 +++++++++++++++--------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index f341680d..179426a9 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -1,3 +1,4 @@ +use std::io::{Error, ErrorKind}; use std::path::Path; use std::{env, fs}; @@ -6,6 +7,7 @@ use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, CacheResult, Credentials, Image, Kind}; +use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::QoveryCommand; use crate::errors::{CommandError, EngineError, Tag}; @@ -209,8 +211,9 @@ impl LocalDocker { let args = self.context.docker_build_options(); - let mut exit_status: Result<(), CommandError> = - Err(CommandError::new_from_safe_message("No builder names".to_string())); + let mut exit_status: Result<(), command::CommandError> = Err(command::CommandError::ExecutionError( + Error::new(ErrorKind::InvalidData, "No builder names".to_string()), + )); for builder_name in BUILDPACKS_BUILDERS.iter() { let mut buildpacks_args = if !use_build_cache { @@ -297,45 +300,40 @@ impl LocalDocker { // buildpacks build let mut cmd = QoveryCommand::new("pack", &buildpacks_args, &self.get_docker_host_envs()); - exit_status = cmd - .exec_with_abort( - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), - |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); + exit_status = cmd.exec_with_abort( + Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), + |line| { + self.logger.log( + LogLevel::Info, + EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), + ); - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Info, - Some(line), - self.context.execution_id(), - )); - }, - |line| { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new_from_safe(line.to_string()), - ), - ); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { + id: build.image.application_id.clone(), + }, + ProgressLevel::Info, + Some(line), + self.context.execution_id(), + )); + }, + |line| { + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), + ); - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Warn, - Some(line), - self.context.execution_id(), - )); - }, - is_task_canceled, - ) - .map_err(|err| CommandError::new(format!("{:?}", err), None)); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { + id: build.image.application_id.clone(), + }, + ProgressLevel::Warn, + Some(line), + self.context.execution_id(), + )); + }, + is_task_canceled, + ); if exit_status.is_ok() { // quit now if the builder successfully build the app From 041e996a6b3ab91f3321b9f593b84c2ccb45a0fb Mon Sep 17 00:00:00 2001 From: Melvin Zottola <37779145+mzottola@users.noreply.github.com> Date: Wed, 9 Mar 2022 08:24:58 +0100 Subject: [PATCH 03/85] Add tag 'latest' for buildpack images as well (#635) --- src/build_platform/local_docker.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 179426a9..718f6bd2 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -208,6 +208,7 @@ impl LocalDocker { is_task_canceled: &dyn Fn() -> bool, ) -> Result { let name_with_tag = build.image.name_with_tag(); + let name_with_latest_tag = build.image.name_with_latest_tag(); let args = self.context.docker_build_options(); @@ -222,6 +223,9 @@ impl LocalDocker { vec!["build", name_with_tag.as_str()] }; + // always add 'latest' tag + buildpacks_args.extend(vec!["-t", name_with_latest_tag.as_str()]); + for v in args.iter() { for s in v.iter() { buildpacks_args.push(String::as_str(s)); From a8529b3a572718ce4d16d2ef1b52ebec40d461fe Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Thu, 10 Mar 2022 10:57:35 +0100 Subject: [PATCH 04/85] fix: disabling some tests (#636) --- tests/aws/aws_databases.rs | 12 ++++++++++++ tests/digitalocean/do_databases.rs | 16 ++++++++-------- tests/scaleway/scw_databases.rs | 8 ++++++++ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index a3ffbfba..13b53f17 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -394,6 +394,7 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -408,6 +409,7 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } @@ -422,6 +424,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -546,6 +549,7 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } @@ -560,6 +564,7 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } @@ -574,6 +579,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -603,6 +609,7 @@ fn private_mongodb_v3_6_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_prod_environment() { test_mongodb_configuration("3.6", function_name!(), MANAGED, true); } @@ -617,6 +624,7 @@ fn private_mongodb_v4_0_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_prod_environment() { test_mongodb_configuration("4.0", function_name!(), MANAGED, true); } @@ -670,6 +678,7 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } @@ -766,6 +775,7 @@ fn private_redis_v5_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } @@ -795,6 +805,7 @@ fn private_redis_v5_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_prod_environment() { test_redis_configuration("5", function_name!(), MANAGED, true); } @@ -809,6 +820,7 @@ fn private_redis_v6_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_redis_v6_deploy_a_working_prod_environment() { test_redis_configuration("6", function_name!(), MANAGED, true); } diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index ca035c44..478b1c1b 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -429,6 +429,7 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[ignore] #[named] #[test] +#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -445,6 +446,7 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[ignore] #[named] #[test] +#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } @@ -459,6 +461,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-do-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -523,15 +526,14 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mongodb_v4_0_deploy_a_working_dev_environment() { @@ -539,15 +541,14 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mongodb_v4_2_deploy_a_working_dev_environment() { @@ -557,6 +558,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-do-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -621,15 +623,14 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mysql_v8_deploy_a_working_dev_environment() { @@ -691,15 +692,14 @@ fn private_redis_v5_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_redis_v6_deploy_a_working_dev_environment() { diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 1fdc5b5b..d6a5ac36 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -434,6 +434,7 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -448,6 +449,7 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } @@ -462,6 +464,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -594,6 +597,7 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } @@ -608,6 +612,7 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } @@ -622,6 +627,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -689,6 +695,7 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } @@ -773,6 +780,7 @@ fn private_redis_v5_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } From be5e9b450a44ff8b3d5d7c7636a3c1245784b66e Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Thu, 10 Mar 2022 14:19:48 +0100 Subject: [PATCH 05/85] fix: handle charts crds (#630) --- src/cloud_provider/aws/kubernetes/helm_charts.rs | 4 ++++ src/cloud_provider/digitalocean/kubernetes/helm_charts.rs | 4 ++++ src/cloud_provider/scaleway/kubernetes/helm_charts.rs | 4 ++++ test_utilities/src/utilities.rs | 2 +- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 78279ba8..e81f342f 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -478,6 +478,10 @@ pub fn aws_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 88c6e0f1..0078579e 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -328,6 +328,10 @@ pub fn do_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 98bb8f3e..2900525e 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -302,6 +302,10 @@ pub fn scw_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index c714efe0..ca06c1d8 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -90,7 +90,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { disable_pleco: Some(true), }; - let enabled_features = vec![Features::LogsHistory, Features::MetricsHistory]; + let enabled_features = vec![Features::LogsHistory]; Context::new( organization_id, From f6944c7728e9e17747a97b6df9a11e9496487c10 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 11 Mar 2022 13:18:56 +0100 Subject: [PATCH 06/85] tests: aws add cluster pause tests (#625) --- tests/aws/aws_kubernetes.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index f5bd8a4c..da79ca77 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -108,6 +108,25 @@ fn create_and_destroy_eks_cluster_in_us_east_2() { ); } +#[cfg(feature = "test-aws-infra")] +#[named] +#[test] +fn create_pause_and_destroy_eks_cluster_in_us_east_2() { + let secrets = FuncTestsSecrets::new(); + let region = "us-east-2".to_string(); + let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); + create_and_destroy_eks_cluster( + region, + AwsRegion::get_zones(&aws_region), + secrets, + ClusterTestType::WithPause, + AWS_KUBERNETES_MAJOR_VERSION, + AWS_KUBERNETES_MINOR_VERSION, + WithoutNatGateways, + function_name!(), + ); +} + // only enable this test manually when we want to perform and validate upgrade process #[cfg(feature = "test-aws-infra")] #[named] From 159b2e0cfc84ae2a56ed7ff0ad3abf494db8088c Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 11 Mar 2022 13:19:31 +0100 Subject: [PATCH 07/85] fix: use cluster long id from input (#631) --- src/cloud_provider/aws/kubernetes/mod.rs | 6 +- .../digitalocean/kubernetes/mod.rs | 6 +- src/cloud_provider/kubernetes.rs | 6 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 6 +- src/errors/mod.rs | 6 +- src/logger.rs | 8 +- src/models.rs | 90 ++++++++++++++++--- 7 files changed, 98 insertions(+), 30 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index d63d6a2c..11e667d7 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -154,9 +154,9 @@ impl EKS { ) -> Result { let event_details = EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(region.to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id.to_string(), name.to_string()), diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 86985aa3..b9364d63 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -124,9 +124,9 @@ impl DOKS { let err = EngineError::new_unsupported_instance_type( EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(region.to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id.to_string(), name.to_string()), diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 7cdadc7f..db0390d4 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -2092,9 +2092,9 @@ mod tests { let event_details = EventDetails::new( Some(Aws), - QoveryIdentifier::new(organization_id.to_string()), - QoveryIdentifier::new(cluster_id.to_string()), - QoveryIdentifier::new(execution_id.to_string()), + QoveryIdentifier::new_from_long_id(organization_id.to_string()), + QoveryIdentifier::new_from_long_id(cluster_id.to_string()), + QoveryIdentifier::new_from_long_id(execution_id.to_string()), Some("region_fake".to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(cluster_id.to_string(), format!("{}-name", cluster_id)), diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 44c8b8f5..a708edae 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -162,9 +162,9 @@ impl Kapsule { let err = EngineError::new_unsupported_instance_type( EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(zone.region_str().to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id, name), diff --git a/src/errors/mod.rs b/src/errors/mod.rs index b3b1a6a8..8e05135b 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -342,9 +342,9 @@ impl EngineError { tag: Tag::Unknown, event_details: EventDetails::new( None, - QoveryIdentifier::new("".to_string()), - QoveryIdentifier::new("".to_string()), - QoveryIdentifier::new(e.execution_id.to_string()), + QoveryIdentifier::new_from_long_id("".to_string()), + QoveryIdentifier::new_from_long_id("".to_string()), + QoveryIdentifier::new_from_long_id(e.execution_id.to_string()), None, Stage::General(GeneralStep::UnderMigration), match e.scope { diff --git a/src/logger.rs b/src/logger.rs index f5a1a2bb..1af91fec 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -100,11 +100,11 @@ mod tests { #[test] fn test_log() { // setup: - let orga_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); - let cluster_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); + let orga_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); + let cluster_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let cluster_name = format!("qovery-{}", cluster_id); - let execution_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); - let app_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); + let execution_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); + let app_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let app_name = format!("simple-app-{}", app_id); let qovery_message = "Qovery message"; let user_message = "User message"; diff --git a/src/models.rs b/src/models.rs index fb3d812b..8a88f4bb 100644 --- a/src/models.rs +++ b/src/models.rs @@ -28,27 +28,34 @@ use crate::utilities::get_image_tag; #[derive(Clone, Debug)] pub struct QoveryIdentifier { - raw: String, + raw_long_id: String, short: String, } impl QoveryIdentifier { - pub fn new(raw: String) -> Self { + pub fn new(raw_long_id: String, raw_short_id: String) -> Self { QoveryIdentifier { - raw: raw.to_string(), - short: QoveryIdentifier::extract_short(raw.as_str()), + raw_long_id, + short: raw_short_id, } } + pub fn new_from_long_id(raw_long_id: String) -> Self { + QoveryIdentifier::new( + raw_long_id.to_string(), + QoveryIdentifier::extract_short(raw_long_id.as_str()), + ) + } + pub fn new_random() -> Self { - Self::new(uuid::Uuid::new_v4().to_string()) + Self::new_from_long_id(uuid::Uuid::new_v4().to_string()) } fn extract_short(raw: &str) -> String { - let max_execution_id_chars: usize = 7; - match raw.char_indices().nth(max_execution_id_chars) { + let max_execution_id_chars: usize = 8; + match raw.char_indices().nth(max_execution_id_chars - 1) { None => raw.to_string(), - Some((idx, _)) => raw[..idx].to_string(), + Some((_, _)) => raw[..max_execution_id_chars].to_string(), } } @@ -59,13 +66,13 @@ impl QoveryIdentifier { impl From for QoveryIdentifier { fn from(s: String) -> Self { - QoveryIdentifier::new(s) + QoveryIdentifier::new_from_long_id(s) } } impl Display for QoveryIdentifier { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(self.raw.as_str()) + f.write_str(self.raw_long_id.as_str()) } } @@ -1410,7 +1417,7 @@ impl ToTerraformString for Ipv4Addr { #[cfg(test)] mod tests { - use crate::models::Domain; + use crate::models::{Domain, QoveryIdentifier}; #[test] fn test_domain_new() { @@ -1488,4 +1495,65 @@ mod tests { ); } } + + #[test] + fn test_qovery_identifier_new_from_long_id() { + struct TestCase<'a> { + input: String, + expected_long_id_output: String, + expected_short_output: String, + description: &'a str, + } + + // setup: + let test_cases: Vec = vec![ + TestCase { + input: "".to_string(), + expected_long_id_output: "".to_string(), + expected_short_output: "".to_string(), + description: "empty raw long ID input", + }, + TestCase { + input: "2a365285-992f-4285-ab96-c55ac81ecde9".to_string(), + expected_long_id_output: "2a365285-992f-4285-ab96-c55ac81ecde9".to_string(), + expected_short_output: "2a365285".to_string(), + description: "proper Uuid input", + }, + TestCase { + input: "2a365285".to_string(), + expected_long_id_output: "2a365285".to_string(), + expected_short_output: "2a365285".to_string(), + description: "non standard Uuid input, length 8", + }, + TestCase { + input: "2a365285hebnrfvuebr".to_string(), + expected_long_id_output: "2a365285hebnrfvuebr".to_string(), + expected_short_output: "2a365285".to_string(), + description: "non standard Uuid input, length longer than expected short (length 8)", + }, + TestCase { + input: "2a365".to_string(), + expected_long_id_output: "2a365".to_string(), + expected_short_output: "2a365".to_string(), + description: "non standard Uuid input, length shorter than expected short (length 8)", + }, + ]; + + for tc in test_cases { + // execute: + let result = QoveryIdentifier::new_from_long_id(tc.input.clone()); + + // verify: + assert_eq!( + tc.expected_long_id_output, result.raw_long_id, + "case {} : '{}'", + tc.description, tc.input + ); + assert_eq!( + tc.expected_short_output, result.short, + "case {} : '{}'", + tc.description, tc.input + ); + } + } } From 1dca33108562ac6873036b2bdcc8c0764d949722 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 11 Mar 2022 13:21:21 +0100 Subject: [PATCH 08/85] feat: migrate docker to new logging (#633) Ticket: ENG-1135 --- src/build_platform/local_docker.rs | 35 +++++++++++++++++++++++++++--- src/errors/io.rs | 2 ++ src/errors/mod.rs | 22 +++++++++++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 718f6bd2..591f58ac 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -415,7 +415,15 @@ impl BuildPlatform for LocalDocker { } fn has_cache(&self, build: &Build) -> Result { - info!("LocalDocker.has_cache() called for {}", self.name()); + let event_details = self.get_event_details(); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("LocalDocker.has_cache() called".to_string()), + ), + ); // Check if a local cache layers for the container image exists. let repository_root_path = self.get_repository_build_root_path(&build)?; @@ -430,13 +438,34 @@ impl BuildPlatform for LocalDocker { }; // check if local layers exist - let mut cmd = QoveryCommand::new("docker", &["images", "-q", parent_build.image.name.as_str()], &[]); + let cmd_bin = "docker"; + let image_name = parent_build.image.name.clone(); + let cmd_args = vec!["images", "-q", &image_name]; + let mut cmd = QoveryCommand::new(cmd_bin, &cmd_args.clone(), &[]); let mut result = CacheResult::Miss(parent_build); let _ = cmd.exec_with_timeout( Duration::minutes(1), // `docker images` command can be slow with tons of images - it's probably not indexed |_| result = CacheResult::Hit, // if a line is returned, then the image is locally present - |r_err| error!("Error executing docker command {}", r_err), + |r_err| { + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_docker_cannot_list_images( + event_details.clone(), + CommandError::new_from_command_line( + "Cannot list docker images".to_string(), + cmd_bin.to_string(), + cmd_args.clone().into_iter().map(|v| v.to_string()).collect(), + vec![], + None, + Some(r_err.to_string()), + ), + ), + None, + ), + ) + }, ); Ok(result) diff --git a/src/errors/io.rs b/src/errors/io.rs index 0cebdd46..8577a365 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -98,6 +98,7 @@ pub enum Tag { BuilderCloningRepositoryError, DockerPushImageError, DockerPullImageError, + BuilderDockerCannotListImages, } impl From for Tag { @@ -186,6 +187,7 @@ impl From for Tag { errors::Tag::BuilderCloningRepositoryError => Tag::BuilderCloningRepositoryError, errors::Tag::DockerPushImageError => Tag::DockerPushImageError, errors::Tag::DockerPullImageError => Tag::DockerPullImageError, + errors::Tag::BuilderDockerCannotListImages => Tag::BuilderDockerCannotListImages, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 8e05135b..3cbc4d99 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -231,6 +231,8 @@ pub enum Tag { BuilderDockerCannotExtractEnvVarsFromDockerfile, /// BuilderDockerCannotBuildContainerImage: represents an error while trying to build Docker container image. BuilderDockerCannotBuildContainerImage, + /// BuilderDockerCannotListImages: represents an error while trying to list docker images. + BuilderDockerCannotListImages, /// BuilderBuildpackInvalidLanguageFormat: represents an error where buildback requested language has wrong format. BuilderBuildpackInvalidLanguageFormat, /// BuilderBuildpackCannotBuildContainerImage: represents an error while trying to build container image with Buildpack. @@ -2390,4 +2392,24 @@ impl EngineError { Some("It looks like there is something wrong in your Dockerfile. Try building the application locally with `docker build --no-cache`.".to_string()), ) } + + /// Creates new error when trying to list Docker images. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `raw_error`: Raw error message. + pub fn new_docker_cannot_list_images(event_details: EventDetails, raw_error: CommandError) -> EngineError { + let message = "Error while trying to list docker images."; + + EngineError::new( + event_details, + Tag::BuilderDockerCannotListImages, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } } From 32883b8ffd3c5b379ecc2e59cc914c50445f8a9f Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 11 Mar 2022 16:11:11 +0100 Subject: [PATCH 09/85] feat: migrate container registries to new logging (#638) Ticket: ENG-1136 --- src/cmd/command.rs | 13 + src/container_registry/docker.rs | 317 +++++++++++---- src/container_registry/docker_hub.rs | 113 ++++-- src/container_registry/docr.rs | 367 ++++++++++++------ src/container_registry/ecr.rs | 323 +++++++++------ src/container_registry/mod.rs | 18 +- .../scaleway_container_registry.rs | 275 ++++++++----- src/errors/io.rs | 18 + src/errors/mod.rs | 237 +++++++++++ src/transaction.rs | 28 +- test_utilities/src/aws.rs | 4 +- test_utilities/src/digitalocean.rs | 6 +- test_utilities/src/scaleway.rs | 6 +- 13 files changed, 1291 insertions(+), 434 deletions(-) diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 38508699..05cea017 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -32,6 +32,19 @@ pub enum CommandError { Killed(String), } +impl CommandError { + pub fn to_string(&self) -> String { + match self { + ExecutionError(err) => format!("Execution error: {}", err.to_string()), + ExitStatusError(exit_status) => { + format!("Execution error: exit status {}", exit_status.to_string()) + } + TimeoutError(msg) => format!("Execution error: timeout, {}", msg.to_string()), + Killed(msg) => format!("Execution error: killed, {}", msg.to_string()), + } + } +} + pub struct QoveryCommand { command: Command, } diff --git a/src/container_registry/docker.rs b/src/container_registry/docker.rs index 5cd30146..4eaa688d 100644 --- a/src/container_registry/docker.rs +++ b/src/container_registry/docker.rs @@ -2,7 +2,9 @@ use crate::build_platform::Image; use crate::cmd; use crate::cmd::command::QoveryCommand; use crate::container_registry::Kind; -use crate::error::{SimpleError, SimpleErrorKind}; +use crate::errors::CommandError; +use crate::events::{EngineEvent, EventDetails, EventMessage}; +use crate::logger::{LogLevel, Logger}; use chrono::Duration; use retry::delay::Fibonacci; use retry::Error::Operation; @@ -39,7 +41,9 @@ pub fn docker_manifest_inspect( image_name: String, image_tag: String, registry_url: String, -) -> Option { + event_details: EventDetails, + logger: &dyn Logger, +) -> Result { let image_with_tag = format!("{}:{}", image_name, image_tag); let registry_provider = match container_registry_kind { Kind::DockerHub => "DockerHub", @@ -63,26 +67,44 @@ pub fn docker_manifest_inspect( Ok(_) => { let joined = raw_output.join(""); match serde_json::from_str(&joined) { - Ok(extracted_manifest) => Some(extracted_manifest), + Ok(extracted_manifest) => Ok(extracted_manifest), Err(e) => { - error!( - "error while trying to deserialize manifest image manifest for image {} in {} ({}): {:?}", - image_with_tag, registry_provider, registry_url, e, + let error = CommandError::new( + e.to_string(), + Some(format!( + "Error while trying to deserialize manifest image manifest for image {} in {} ({}).", + image_with_tag, registry_provider, registry_url, + )), ); - None + + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::from(error.clone())), + ); + + Err(error) } } } Err(e) => { - error!( - "error while trying to inspect image manifest for image {} in {} ({}), command `{}`: {:?}", - image_with_tag, - registry_provider, - registry_url, - cmd::command::command_to_string(binary, &args, &envs), - e, + let error = CommandError::new( + format!( + "Command `{}`: {:?}", + cmd::command::command_to_string(binary, &args, &envs), + e + ), + Some(format!( + "Error while trying to inspect image manifest for image {} in {} ({}).", + image_with_tag, registry_provider, registry_url, + )), ); - None + + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::from(error.clone())), + ); + + Err(error) } }; } @@ -93,7 +115,9 @@ pub fn docker_login( registry_login: String, registry_pass: String, registry_url: String, -) -> Result<(), SimpleError> { + event_details: EventDetails, + logger: &dyn Logger, +) -> Result<(), CommandError> { let registry_provider = match container_registry_kind { Kind::DockerHub => "DockerHub", Kind::Ecr => "AWS ECR", @@ -115,16 +139,24 @@ pub fn docker_login( match cmd.exec() { Ok(_) => Ok(()), Err(e) => { - let error_message = format!( - "error while trying to login to registry {} {}, command `{}`: {:?}", - registry_provider, - registry_url, - cmd::command::command_to_string(binary, &args, &docker_envs), - e, + let err = CommandError::new( + format!( + "Command `{}`: {:?}", + cmd::command::command_to_string(binary, &args, &docker_envs), + e, + ), + Some(format!( + "Error while trying to login to registry {} {}.", + registry_provider, registry_url, + )), ); - error!("{}", error_message); - Err(SimpleError::new(SimpleErrorKind::Other, Some(error_message))) + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::from(err.clone())), + ); + + Err(err) } } } @@ -135,7 +167,9 @@ pub fn docker_tag_and_push_image( image: &Image, dest: String, dest_latest_tag: String, -) -> Result<(), SimpleError> { + event_details: EventDetails, + logger: &dyn Logger, +) -> Result<(), CommandError> { let image_with_tag = image.name_with_tag(); let registry_provider = match container_registry_kind { Kind::DockerHub => "DockerHub", @@ -144,50 +178,114 @@ pub fn docker_tag_and_push_image( Kind::ScalewayCr => "Scaleway Registry", }; - let mut cmd = QoveryCommand::new("docker", &vec!["tag", &image_with_tag, dest.as_str()], &docker_envs); + let binary = "docker"; + let args = vec!["tag", &image_with_tag, dest.as_str()]; + let mut cmd = QoveryCommand::new(binary, &args, &docker_envs); match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { Ok(_) => OperationResult::Ok(()), Err(e) => { - info!("failed to tag image {}, retrying...", image_with_tag); + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!("Failed to tag image `{}`, retrying...", image_with_tag), + Some(format!( + "Command `{}`: {:?}", + cmd::command::command_to_string(binary, &args, &docker_envs), + e + )), + ), + ), + ); + OperationResult::Retry(e) } }) { Err(Operation { error, .. }) => { - return Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!("failed to tag image {}: {:?}", image_with_tag, error)), - )) + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::from(CommandError::new_from_legacy_command_error( + error, + Some(format!("Error while trying to tag docker image `{}`", image_with_tag)), + )), + ), + ); } - _ => {} + Err(retry::Error::Internal(msg)) => { + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::from(CommandError::new( + msg, + Some(format!("Error while trying to tag docker image `{}`", image_with_tag)), + )), + ), + ); + } + Ok(_) => {} } let mut cmd = QoveryCommand::new("docker", &vec!["push", dest.as_str()], &docker_envs); let _ = match retry::retry(Fibonacci::from_millis(5000).take(5), || { match cmd.exec_with_timeout( Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), + |line| { + logger.log( + LogLevel::Info, + EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), + ) + }, + |line| { + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), + ) + }, ) { Ok(_) => OperationResult::Ok(()), Err(e) => { - warn!( - "failed to push image {} on {}, {:?} retrying...", - image_with_tag, registry_provider, e + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Failed to push image `{}` on `{}`, retrying ...", + image_with_tag, registry_provider + ), + Some(format!("{:?}", e)), + ), + ), ); + OperationResult::Retry(e) } } }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "unknown error while trying to push image {} to {}. {:?}", - image_with_tag, registry_provider, e - )), + Err(Operation { error, .. }) => Err(CommandError::new_from_legacy_command_error( + error, + Some(format!("Failed to push docker image `{}`", image_with_tag)), + )), + Err(retry::Error::Internal(msg)) => Err(CommandError::new( + msg, + Some(format!("Failed to push docker image `{}`", image_with_tag)), )), _ => { - info!("image {} has successfully been pushed", image_with_tag); + logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Image {} has successfully been pushed on `{}`", + image_with_tag, registry_provider + )), + ), + ); + Ok(()) } }; @@ -201,14 +299,29 @@ pub fn docker_tag_and_push_image( match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { Ok(_) => OperationResult::Ok(()), Err(e) => { - info!("failed to tag image {}, retrying...", image_with_latest_tag); + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!("Failed to tag image `{}`, retrying ...", image_with_latest_tag), + Some(format!("{:?}", e)), + ), + ), + ); OperationResult::Retry(e) } }) { Err(Operation { error, .. }) => { - return Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!("failed to tag image {}: {:?}", image_with_latest_tag, error)), + return Err(CommandError::new_from_legacy_command_error( + error, + Some(format!("Failed to tag docker image `{}`", image_with_tag)), + )) + } + Err(retry::Error::Internal(msg)) => { + return Err(CommandError::new( + msg, + Some(format!("Failed to tag docker image `{}`", image_with_tag)), )) } _ => {} @@ -218,29 +331,54 @@ pub fn docker_tag_and_push_image( match retry::retry(Fibonacci::from_millis(5000).take(5), || { match cmd.exec_with_timeout( Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), + |line| { + logger.log( + LogLevel::Info, + EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), + ) + }, + |line| { + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), + ) + }, ) { Ok(_) => OperationResult::Ok(()), Err(e) => { - warn!( - "failed to push image {} on {}, {:?} retrying...", - image_with_latest_tag, registry_provider, e + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Failed to push image {} on {}, retrying...", + image_with_tag, registry_provider + ), + Some(format!("{:?}", e)), + ), + ), ); OperationResult::Retry(e) } } }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, + Err(Operation { error, .. }) => Err(CommandError::new(error.to_string(), None)), + Err(e) => Err(CommandError::new( + format!("{:?}", e), Some(format!( - "unknown error while trying to push image {} to {}. {:?}", - image_with_latest_tag, registry_provider, e + "Unknown error while trying to push image {} to {}.", + image_with_tag, registry_provider, )), )), _ => { - info!("image {} has successfully been pushed", image_with_latest_tag); + logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("image {} has successfully been pushed", image_with_tag)), + ), + ); Ok(()) } } @@ -250,7 +388,9 @@ pub fn docker_pull_image( container_registry_kind: Kind, docker_envs: Vec<(&str, &str)>, dest: String, -) -> Result<(), SimpleError> { + event_details: EventDetails, + logger: &dyn Logger, +) -> Result<(), CommandError> { let registry_provider = match container_registry_kind { Kind::DockerHub => "DockerHub", Kind::Ecr => "AWS ECR", @@ -262,36 +402,59 @@ pub fn docker_pull_image( match retry::retry(Fibonacci::from_millis(5000).take(5), || { match cmd.exec_with_timeout( Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), + |line| { + logger.log( + LogLevel::Info, + EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), + ) + }, + |line| { + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), + ) + }, ) { Ok(_) => OperationResult::Ok(()), Err(e) => { - warn!( - "failed to pull image from {} registry {}, {:?} retrying...", - registry_provider, - dest.as_str(), - e, + logger.log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "failed to pull image from {} registry {}, retrying...", + registry_provider, + dest.as_str(), + ), + Some(format!("{:?}", e)), + ), + ), ); OperationResult::Retry(e) } } }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, + Err(Operation { error, .. }) => Err(CommandError::new(error.to_string(), None)), + Err(e) => Err(CommandError::new( + format!("{:?}", e), Some(format!( - "unknown error while trying to pull image {} from {} registry. {:?}", + "Unknown error while trying to pull image {} from {} registry.", dest.as_str(), registry_provider, - e, )), )), _ => { - info!( - "image {} has successfully been pulled from {} registry", - dest.as_str(), - registry_provider, + logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Image {} has successfully been pulled from {} registry", + dest.as_str(), + registry_provider, + )), + ), ); Ok(()) } diff --git a/src/container_registry/docker_hub.rs b/src/container_registry/docker_hub.rs index db4e2131..63fb48b7 100644 --- a/src/container_registry/docker_hub.rs +++ b/src/container_registry/docker_hub.rs @@ -1,14 +1,15 @@ extern crate reqwest; use reqwest::StatusCode; +use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult}; -use crate::error::EngineErrorCause; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; +use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; +use crate::errors::{CommandError, EngineError}; +use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; +use crate::logger::{LogLevel, Logger}; use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; @@ -20,10 +21,11 @@ pub struct DockerHub { login: String, password: String, listeners: Listeners, + logger: Box, } impl DockerHub { - pub fn new(context: Context, id: &str, name: &str, login: &str, password: &str) -> Self { + pub fn new(context: Context, id: &str, name: &str, login: &str, password: &str, logger: Box) -> Self { DockerHub { context, id: id.to_string(), @@ -31,10 +33,13 @@ impl DockerHub { login: login.to_string(), password: password.to_string(), listeners: vec![], + logger, } } pub fn exec_docker_login(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(); + let envs = match self.context.docker_tcp_socket() { Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], None => vec![], @@ -48,27 +53,25 @@ impl DockerHub { match cmd.exec() { Ok(_) => Ok(()), - Err(_) => Err(self.engine_error( - EngineErrorCause::User( - "Your DockerHub account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to DockerHub {}", self.name_with_id()), + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, )), } } fn pull_image(&self, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), vec![], dest.clone()) { + let event_details = self.get_event_details(); + match docker_pull_image(self.kind(), vec![], dest.clone(), event_details.clone(), self.logger()) { Ok(_) => { let mut image = image.clone(); image.registry_url = Some(dest); Ok(PullResult::Some(image)) } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), + Err(e) => Err(EngineError::new_docker_pull_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, )), } } @@ -97,7 +100,7 @@ impl ContainerRegistry for DockerHub { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn is_valid(&self) -> Result<(), EngineError> { Ok(()) } @@ -118,6 +121,7 @@ impl ContainerRegistry for DockerHub { } fn does_image_exists(&self, image: &Image) -> bool { + let event_details = self.get_event_details(); use reqwest::blocking::Client; let client = Client::new(); let path = format!( @@ -133,13 +137,27 @@ impl ContainerRegistry for DockerHub { match res { Ok(out) => matches!(out.status(), StatusCode::OK), Err(e) => { - error!("While trying to retrieve if DockerHub repository exist {:?}", e); + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_container_registry_repository_doesnt_exist( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new( + e.to_string(), + Some("Error while trying to retrieve if DockerHub repository exist.".to_string()), + )), + ), + None, + ), + ); false } } } fn pull(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); if !self.does_image_exists(image) { @@ -148,7 +166,14 @@ impl ContainerRegistry for DockerHub { image, self.name() ); - info!("{}", info_message.as_str()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -163,7 +188,14 @@ impl ContainerRegistry for DockerHub { } let info_message = format!("pull image {:?} from DockerHub {} repository", image, self.name()); - info!("{}", info_message.as_str()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -183,6 +215,8 @@ impl ContainerRegistry for DockerHub { } fn push(&self, image: &Image, force_push: bool) -> Result { + let event_details = self.get_event_details(); + let _ = self.exec_docker_login()?; let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str()); @@ -196,7 +230,13 @@ impl ContainerRegistry for DockerHub { self.name() ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -219,6 +259,14 @@ impl ContainerRegistry for DockerHub { self.name() ); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); + listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: image.application_id.clone(), @@ -229,16 +277,25 @@ impl ContainerRegistry for DockerHub { )); let dest_latest_tag = format!("{}/{}:latest", self.login.as_str(), image.name); - match docker_tag_and_push_image(self.kind(), vec![], &image, dest.clone(), dest_latest_tag) { + match docker_tag_and_push_image( + self.kind(), + vec![], + &image, + dest.clone(), + dest_latest_tag, + event_details.clone(), + self.logger(), + ) { Ok(_) => { let mut image = image.clone(); image.registry_url = Some(dest); Ok(PushResult { image }) } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), + Err(e) => Err(EngineError::new_docker_push_image_error( + event_details.clone(), + image.name.to_string(), + dest.to_string(), + e, )), } } @@ -246,6 +303,10 @@ impl ContainerRegistry for DockerHub { fn push_error(&self, _image: &Image) -> Result { unimplemented!() } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } } impl Listen for DockerHub { diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index eeabe76d..6b71b36b 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -2,14 +2,15 @@ extern crate digitalocean; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult}; -use crate::error::{cast_simple_error_to_engine_error, EngineErrorCause, SimpleError, SimpleErrorKind}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; +use crate::errors::CommandError; +use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; +use crate::logger::{LogLevel, Logger}; use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; @@ -30,34 +31,36 @@ pub struct DOCR { pub api_key: String, pub id: String, pub listeners: Listeners, + pub logger: Box, } impl DOCR { - pub fn new(context: Context, id: &str, name: &str, api_key: &str) -> Self { + pub fn new(context: Context, id: &str, name: &str, api_key: &str, logger: Box) -> Self { DOCR { context, name: name.into(), api_key: api_key.into(), id: id.into(), listeners: vec![], + logger, } } fn get_registry_name(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); + let registry_name = match image.registry_name.as_ref() { // DOCR does not support upper cases Some(registry_name) => registry_name.to_lowercase(), - None => cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - get_current_registry_name(self.api_key.as_str()), - )?, + None => get_current_registry_name(self.api_key.as_str(), event_details, self.logger())?, }; Ok(registry_name) } fn create_repository(&self, image: &Image) -> Result<(), EngineError> { + let event_details = self.get_event_details(); + let registry_name = match image.registry_name.as_ref() { // DOCR does not support upper cases Some(registry_name) => registry_name.to_lowercase(), @@ -85,50 +88,76 @@ impl DOCR { StatusCode::OK => Ok(()), StatusCode::CREATED => Ok(()), status => { - warn!("status from DO registry API {}", status); - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "Bad status code : {} returned by the DO registry API for creating DO CR {}", + return Err(EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + self.name_with_id(), + registry_name.to_string(), + CommandError::new_from_safe_message(format!( + "Bad status code: `{}` returned by the DO registry API for creating DOCR `{}`.", status, registry_name.as_str(), - ), + )), )); } }, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("failed to create DOCR repository {} : {:?}", registry_name.as_str(), e,), + return Err(EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + self.name_with_id(), + registry_name.to_string(), + CommandError::new( + e.to_string(), + Some(format!( + "Failed to create DOCR repository `{}`.", + registry_name.as_str(), + )), + ), )); } } } Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("Unable to initialize DO Registry {} : {:?}", registry_name.as_str(), e,), + return Err(EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + self.name_with_id(), + registry_name.to_string(), + CommandError::new( + e.to_string(), + Some(format!( + "Failed to create DOCR repository `{}`.", + registry_name.as_str(), + )), + ), )); } } } fn push_image(&self, registry_name: String, dest: String, image: &Image) -> Result { + let event_details = self.get_event_details(); + let dest_latest_tag = format!( "registry.digitalocean.com/{}/{}:latest", registry_name.as_str(), image.name ); - let _ = match docker_tag_and_push_image(self.kind(), vec![], &image, dest.clone(), dest_latest_tag) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )); - } - }; + + if let Err(e) = docker_tag_and_push_image( + self.kind(), + vec![], + image, + dest.clone(), + dest_latest_tag.clone(), + event_details.clone(), + self.logger(), + ) { + return Err(EngineError::new_docker_push_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, + )); + } let mut image = image.clone(); image.registry_name = Some(registry_name.clone()); @@ -140,15 +169,23 @@ impl DOCR { match self.does_image_exists(&image) { true => OperationResult::Ok(&image), false => { - warn!("image is not yet available on Digital Ocean Registry, retrying in a few seconds..."); + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning( + self.get_event_details(), + EventMessage::new_from_safe( + "Image is not yet available on DOCR, retrying in a few seconds...".to_string(), + ), + ), + ); OperationResult::Retry(()) } } }); - let image_not_reachable = Err(self.engine_error( - EngineErrorCause::Internal, - "image has been pushed on Digital Ocean Registry but is not yet available after 2min. Please try to redeploy in a few minutes".to_string(), + let image_not_reachable = Err(EngineError::new_container_registry_image_unreachable_after_push( + event_details.clone(), + image.name.to_string(), )); match result { Ok(_) => Ok(PushResult { image }), @@ -167,6 +204,8 @@ impl DOCR { } pub fn delete_repository(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(); + let headers = utilities::get_header_with_bearer(&self.api_key); let res = reqwest::blocking::Client::new() .delete(CR_API_PATH) @@ -177,26 +216,32 @@ impl DOCR { Ok(out) => match out.status() { StatusCode::NO_CONTENT => Ok(()), status => { - warn!("delete status from DO registry API {}", status); - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "Bad status code : {} returned by the DO registry API for deleting DOCR repository", + return Err(EngineError::new_container_registry_delete_repository_error( + event_details.clone(), + "default".to_string(), // DO has only one repository + Some(CommandError::new_from_safe_message(format!( + "Bad status code: `{}` returned by the DO registry API for deleting DOCR.", status, - ), + ))), )); } }, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("No response from the Digital Ocean API : {:?}", e), + return Err(EngineError::new_container_registry_delete_repository_error( + event_details.clone(), + "default".to_string(), // DO has only one repository + Some(CommandError::new( + e.to_string(), + Some("No response from the Digital Ocean API.".to_string()), + )), )); } } } pub fn exec_docr_login(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(); + let mut cmd = QoveryCommand::new( "doctl", &vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], @@ -205,18 +250,16 @@ impl DOCR { match cmd.exec() { Ok(_) => Ok(()), - Err(_) => Err(self.engine_error( - EngineErrorCause::User( - "Your DOCR account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to DOCR {}", self.name_with_id()), + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, )), } } fn pull_image(&self, registry_name: String, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), vec![], dest.clone()) { + let event_details = self.get_event_details(); + + match docker_pull_image(self.kind(), vec![], dest.clone(), event_details.clone(), self.logger()) { Ok(_) => { let mut image = image.clone(); image.registry_name = Some(registry_name.clone()); @@ -225,10 +268,11 @@ impl DOCR { image.registry_url = Some(dest); Ok(PullResult::Some(image)) } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), + Err(e) => Err(EngineError::new_docker_pull_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, )), } } @@ -257,7 +301,7 @@ impl ContainerRegistry for DOCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn is_valid(&self) -> Result<(), EngineError> { Ok(()) } @@ -278,10 +322,12 @@ impl ContainerRegistry for DOCR { } fn does_image_exists(&self, image: &Image) -> bool { + let event_details = self.get_event_details(); + let registry_name = match self.get_registry_name(image) { Ok(registry_name) => registry_name, Err(err) => { - warn!("{:?}", err); + self.logger.log(LogLevel::Error, EngineEvent::Error(err, None)); return false; } }; @@ -302,18 +348,38 @@ impl ContainerRegistry for DOCR { Ok(output) => match output.status() { StatusCode::OK => output.text(), _ => { - error!( - "While tyring to get all tags for image: {}, maybe this image not exist !", - &image.name + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_container_registry_image_doesnt_exist( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new_from_safe_message(format!( + "While tyring to get all tags for image: `{}`, maybe this image not exist !", + image.name.to_string() + ))), + ), + None, + ), ); return false; } }, Err(_) => { - error!( - "While trying to communicate with DigitalOcean API to retrieve all tags for image {}", - &image.name + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_container_registry_image_doesnt_exist( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new_from_safe_message(format!( + "While trying to communicate with DigitalOcean API to retrieve all tags for image `{}`.", + image.name.to_string() + ))), + ), + None, + ), ); return false; @@ -334,9 +400,22 @@ impl ContainerRegistry for DOCR { false } Err(_) => { - error!( - "Unable to deserialize tags from DigitalOcean API for image {}", - &image.tag + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_container_registry_image_doesnt_exist( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new( + out.to_string(), + Some(format!( + "Unable to deserialize tags from DigitalOcean API for image {}", + &image.tag.to_string(), + )), + )), + ), + None, + ), ); false @@ -344,9 +423,19 @@ impl ContainerRegistry for DOCR { } } _ => { - error!( - "while retrieving tags for image {} Unable to get output from DigitalOcean API", - &image.name + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + EngineError::new_container_registry_image_doesnt_exist( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new_from_safe_message(format!( + "While retrieving tags for image `{}` Unable to get output from DigitalOcean API.", + image.name.to_string() + ))), + ), + None, + ), ); false @@ -355,11 +444,19 @@ impl ContainerRegistry for DOCR { } fn pull(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); if !self.does_image_exists(image) { let info_message = format!("image {:?} does not exist in DOCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -374,7 +471,14 @@ impl ContainerRegistry for DOCR { } let info_message = format!("pull image {:?} from DOCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -401,11 +505,27 @@ impl ContainerRegistry for DOCR { // https://www.digitalocean.com/docs/images/container-registry/how-to/use-registry-docker-kubernetes/ fn push(&self, image: &Image, force_push: bool) -> Result { + let event_details = self.get_event_details(); let registry_name = self.get_registry_name(image)?; match self.create_repository(image) { - Ok(_) => info!("DOCR {} has been created", registry_name.as_str()), - Err(_) => warn!("DOCR {} already exists", registry_name.as_str()), + Ok(_) => self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("DOCR {} has been created", registry_name.as_str())), + ), + ), + Err(e) => self.logger.log( + LogLevel::Error, + EngineEvent::Error( + e.clone(), + Some(EventMessage::new_from_safe(format!( + "DOCR {} already exists", + registry_name.as_str() + ))), + ), + ), }; let _ = self.exec_docr_login()?; @@ -426,7 +546,13 @@ impl ContainerRegistry for DOCR { registry_name.as_str() ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -451,6 +577,14 @@ impl ContainerRegistry for DOCR { image, registry_name ); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); + listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: image.application_id.clone(), @@ -466,6 +600,10 @@ impl ContainerRegistry for DOCR { fn push_error(&self, image: &Image) -> Result { Ok(PushResult { image: image.clone() }) } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } } impl Listen for DOCR { @@ -478,7 +616,7 @@ impl Listen for DOCR { } } -pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: &str) -> Result<(), SimpleError> { +pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: &str) -> Result<(), CommandError> { let headers = utilities::get_header_with_bearer(api_key); let cluster_ids = DoApiSubscribeToKubeCluster { cluster_uuids: vec![cluster_uuid.to_string()], @@ -496,31 +634,28 @@ pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: match res { Ok(output) => match output.status() { StatusCode::NO_CONTENT => Ok(()), - status => { - warn!("status from DO registry API {}", status); - Err(SimpleError::new(SimpleErrorKind::Other, Some("Incorrect Status received from Digital Ocean when tyring to subscribe repository to cluster"))) - } + status => Err(CommandError::new_from_safe_message( + format!("Incorrect Status `{}` received from Digital Ocean when tyring to subscribe repository to cluster", status)), + ), }, - Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to call Digital Ocean when tyring to subscribe repository to cluster"), - )) - } + Err(e) => Err(CommandError::new( + e.to_string(), + Some("Unable to call Digital Ocean when tyring to subscribe repository to cluster".to_string()), + )), } } - Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to Serialize digital ocean cluster uuids"), - )) - } + Err(e) => Err(CommandError::new( + e.to_string(), + Some("Unable to Serialize digital ocean cluster uuids".to_string()), + )), }; } -pub fn get_current_registry_name(api_key: &str) -> Result { +pub fn get_current_registry_name( + api_key: &str, + event_details: EventDetails, + logger: &dyn Logger, +) -> Result { let headers = utilities::get_header_with_bearer(api_key); let res = reqwest::blocking::Client::new() .get(CR_API_PATH) @@ -535,29 +670,45 @@ pub fn get_current_registry_name(api_key: &str) -> Result { match res_registry { Ok(registry) => Ok(registry.registry.name), - Err(err) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "An error occurred while deserializing JSON coming from Digital Ocean API: error: {:?}", - err + Err(err) => Err(EngineError::new_container_registry_repository_doesnt_exist( + event_details.clone(), + "default".to_string(), // DO has only one repository + Some(CommandError::new( + err.to_string(), + Some( + "An error occurred while deserializing JSON coming from Digital Ocean API.".to_string(), + ), )), )), } } status => { - warn!("status from Digital Ocean Registry API {}", status); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Incorrect Status received from Digital Ocean when tyring to get container registry"), + Err(EngineError::new_container_registry_repository_doesnt_exist( + event_details.clone(), + "default".to_string(), // DO has only one repository + Some(CommandError::new( + format!("Status: {}", status), + Some( + "Incorrect Status received from Digital Ocean when tyring to get container registry." + .to_string(), + ), + )), )) } }, Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to call Digital Ocean when tyring to fetch the container registry name"), - )) + let err = EngineError::new_container_registry_repository_doesnt_exist( + event_details.clone(), + "default".to_string(), // DO has only one repository + Some(CommandError::new( + e.to_string(), + Some("Unable to call Digital Ocean when tyring to fetch the container registry name.".to_string()), + )), + ); + + logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + + Err(err) } }; } diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 9867b5de..a24ef38a 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::str::FromStr; use rusoto_core::{Client, HttpClient, Region, RusotoError}; @@ -12,9 +13,9 @@ use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; -use crate::error::{EngineError, EngineErrorCause}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; +use crate::errors::{CommandError, EngineError}; +use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; +use crate::logger::{LogLevel, Logger}; use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; @@ -32,6 +33,7 @@ pub struct ECR { secret_access_key: String, region: Region, listeners: Listeners, + logger: Box, } impl ECR { @@ -42,6 +44,7 @@ impl ECR { access_key_id: &str, secret_access_key: &str, region: &str, + logger: Box, ) -> Self { ECR { context, @@ -51,6 +54,7 @@ impl ECR { secret_access_key: secret_access_key.to_string(), region: Region::from_str(region).unwrap(), listeners: vec![], + logger, } } @@ -117,17 +121,27 @@ impl ECR { fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-push-ecr-image.html // docker tag e9ae3c220b23 aws_account_id.dkr.ecr.region.amazonaws.com/my-web-app + let event_details = self.get_event_details(); - match docker_tag_and_push_image(self.kind(), self.docker_envs(), &image, dest.clone(), dest_latest_tag) { + match docker_tag_and_push_image( + self.kind(), + self.docker_envs(), + &image, + dest.clone(), + dest_latest_tag, + event_details.clone(), + self.logger(), + ) { Ok(_) => { let mut image = image.clone(); image.registry_url = Some(dest); Ok(PushResult { image }) } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), + Err(e) => Err(EngineError::new_docker_push_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, )), } } @@ -135,24 +149,40 @@ impl ECR { fn pull_image(&self, dest: String, image: &Image) -> Result { // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html // docker pull aws_account_id.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest + let event_details = self.get_event_details(); - match docker_pull_image(self.kind(), self.docker_envs(), dest.clone()) { + match docker_pull_image( + self.kind(), + self.docker_envs(), + dest.clone(), + event_details.clone(), + self.logger(), + ) { Ok(_) => { let mut image = image.clone(); image.registry_url = Some(dest); Ok(PullResult::Some(image)) } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), + Err(e) => Err(EngineError::new_docker_pull_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, )), } } fn create_repository(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); let repository_name = image.name.as_str(); - info!("creating ECR repository {}", &repository_name); + + self.logger().log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Creating ECR repository {}", &repository_name)), + ), + ); let mut repo_creation_counter = 0; let container_registry_request = DescribeRepositoriesRequest { @@ -172,7 +202,13 @@ impl ECR { .describe_repositories(container_registry_request.clone()), ) { Ok(x) => { - debug!("created {:?} repository", x); + self.logger().log( + LogLevel::Debug, + EngineEvent::Debug( + event_details.clone(), + EventMessage::new_from_safe(format!("Created {:?} repository", x)), + ), + ); OperationResult::Ok(()) } Err(e) => { @@ -180,40 +216,78 @@ impl ECR { RusotoError::Service(s) => match s { DescribeRepositoriesError::RepositoryNotFound(_) => { if repo_creation_counter != 0 { - warn!( - "repository {} was not found, {}x retrying...", - &repository_name, &repo_creation_counter + self.logger().log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Repository {} was not found, {}x retrying...", + &repository_name, &repo_creation_counter + )), + ), ); } repo_creation_counter += 1; } - _ => warn!("{:?}", s), + _ => self.logger().log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + "Error while trying to create repository.".to_string(), + Some(format!("{:?}", s)), + ), + ), + ), }, - _ => warn!("{:?}", e), + _ => self.logger().log( + LogLevel::Warning, + EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + "Error while trying to create repository.".to_string(), + Some(format!("{:?}", e)), + ), + ), + ), } + // TODO: This behavior is weird, returning an ok message saying repository has been created in an error ... let msg = match block_on(self.ecr_client().create_repository(crr.clone())) { Ok(_) => format!("repository {} created", &repository_name), - Err(err) => format!( - "can't create ECR repository {} for {}. {:?}", - &repository_name, - self.name_with_id(), - err - ), + Err(err) => format!("{:?}", err), }; - OperationResult::Retry(Err(self.engine_error(EngineErrorCause::Internal, msg))) + OperationResult::Retry(Err(EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + repository_name.to_string(), + self.name_with_id(), + CommandError::new(msg.to_string(), Some("Can't create ECR repository".to_string())), + ))) } } }); match repo_created { - Ok(_) => info!( - "repository {} created after {} attempt(s)", - &repository_name, repo_creation_counter + Ok(_) => self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "repository {} created after {} attempt(s)", + &repository_name, repo_creation_counter, + )), + ), ), Err(Operation { error, .. }) => return error, - Err(retry::Error::Internal(e)) => return Err(self.engine_error(EngineErrorCause::Internal, e)), + Err(retry::Error::Internal(e)) => { + return Err(EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + repository_name.to_string(), + self.name_with_id(), + CommandError::new_from_safe_message(e), + )) + } }; // apply retention policy @@ -246,32 +320,30 @@ impl ECR { }; match block_on(self.ecr_client().put_lifecycle_policy(plp)) { - Err(err) => { - error!( - "can't set lifecycle policy to ECR repository {} for {}: {}", - image.name.as_str(), - self.name_with_id(), - err - ); - - Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "can't set lifecycle policy to ECR repository {} for {}", - image.name.as_str(), - self.name_with_id() - ), - )) - } + Err(err) => Err( + EngineError::new_container_registry_repository_set_lifecycle_policy_error( + event_details.clone(), + repository_name.to_string(), + CommandError::new_from_safe_message(err.to_string()), + ), + ), _ => Ok(self.get_repository(image).expect("cannot get repository")), } } fn get_or_create_repository(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); + // check if the repository already exists let repository = self.get_repository(image); if repository.is_some() { - info!("ECR repository {} already exists", image.name.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("ECR repository {} already exists", image.name.as_str())), + ), + ); return Ok(repository.unwrap()); } @@ -279,6 +351,7 @@ impl ECR { } fn get_credentials(&self) -> Result { + let event_details = self.get_event_details(); let r = block_on( self.ecr_client() .get_authorization_token(GetAuthorizationTokenRequest::default()), @@ -302,22 +375,16 @@ impl ECR { ) } None => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to retrieve credentials and endpoint URL from ECR {}", - self.name_with_id(), - ), + return Err(EngineError::new_container_registry_get_credentials_error( + event_details.clone(), + self.name_with_id(), )); } }, _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to retrieve credentials and endpoint URL from ECR {}", - self.name_with_id(), - ), + return Err(EngineError::new_container_registry_get_credentials_error( + event_details.clone(), + self.name_with_id(), )); } }; @@ -326,6 +393,7 @@ impl ECR { } fn exec_docker_login(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(); let credentials = self.get_credentials()?; let mut cmd = QoveryCommand::new( @@ -342,12 +410,8 @@ impl ECR { ); if let Err(_) = cmd.exec() { - return Err(self.engine_error( - EngineErrorCause::User( - "Your ECR account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to ECR {}", self.name_with_id()), + return Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details.clone(), )); }; @@ -378,32 +442,60 @@ impl ContainerRegistry for ECR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn is_valid(&self) -> Result<(), EngineError> { let client = StsClient::new_with_client(self.client(), Region::default()); let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); match s { Ok(_) => Ok(()), - Err(_) => Err(NewEngineError::new_client_invalid_cloud_provider_credentials( + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( self.get_event_details(), )), } } fn on_create(&self) -> Result<(), EngineError> { - info!("ECR.on_create() called"); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("ECR.on_create() called".to_string()), + ), + ); Ok(()) } fn on_create_error(&self) -> Result<(), EngineError> { + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("ECR.on_create_error() called".to_string()), + ), + ); + unimplemented!() } fn on_delete(&self) -> Result<(), EngineError> { + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("ECR.on_delete() called".to_string()), + ), + ); unimplemented!() } fn on_delete_error(&self) -> Result<(), EngineError> { + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("ECR.on_delete_error() called".to_string()), + ), + ); unimplemented!() } @@ -412,11 +504,23 @@ impl ContainerRegistry for ECR { } fn pull(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); if !self.does_image_exists(image) { - let info_message = format!("image {:?} does not exist in ECR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + let info_message = format!( + "image `{}` does not exist in ECR {} repository", + image.name_with_tag(), + self.name() + ); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -430,8 +534,19 @@ impl ContainerRegistry for ECR { return Ok(PullResult::None); } - let info_message = format!("pull image {:?} from ECR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + let info_message = format!( + "pull image `{:?}` from ECR {} repository", + image.name_with_tag(), + self.name() + ); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -444,19 +559,7 @@ impl ContainerRegistry for ECR { let _ = self.exec_docker_login()?; - let repository = match self.get_or_create_repository(image) { - Ok(r) => r, - _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to create ECR repository for {} with image {:?}", - self.name_with_id(), - image, - ), - )); - } - }; + let repository = self.get_or_create_repository(image)?; let dest = format!("{}:{}", repository.repository_uri.unwrap(), image.tag.as_str()); @@ -467,25 +570,13 @@ impl ContainerRegistry for ECR { fn push(&self, image: &Image, force_push: bool) -> Result { let _ = self.exec_docker_login()?; - let repository = match if force_push { + let repository = if force_push { self.create_repository(image) } else { self.get_or_create_repository(image) - } { - Ok(r) => r, - _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to create ECR repository for {} with image {:?}", - self.name_with_id(), - image, - ), - )); - } - }; + }?; - let repository_uri = repository.repository_uri.unwrap(); + let repository_uri = repository.repository_uri.expect("Error getting repository URI"); let dest = format!("{}:{}", repository_uri, image.tag.as_str()); let listeners_helper = ListenersHelper::new(&self.listeners); @@ -493,12 +584,18 @@ impl ContainerRegistry for ECR { if !force_push && self.does_image_exists(image) { // check if image does exist - if yes, do not upload it again let info_message = format!( - "image {:?} found on ECR {} repository, container build is not required", - image, + "image {} found on ECR {} repository, container build is not required", + image.name_with_tag(), self.name() ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -516,12 +613,18 @@ impl ContainerRegistry for ECR { } let info_message = format!( - "image {:?} does not exist on ECR {} repository, starting image upload", - image, + "image `{}` does not exist on ECR {} repository, starting image upload", + image.name_with_tag(), self.name() ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -540,6 +643,10 @@ impl ContainerRegistry for ECR { // TODO change this Ok(PushResult { image: image.clone() }) } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } } impl Listen for ECR { diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 7a9bdea6..f74c1dff 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; use crate::build_platform::Image; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; -use crate::errors::EngineError as NewEngineError; +use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; +use crate::logger::Logger; use crate::models::{Context, Listen, QoveryIdentifier}; pub mod docker; @@ -20,7 +20,7 @@ pub trait ContainerRegistry: Listen + ToTransmitter { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), NewEngineError>; + fn is_valid(&self) -> Result<(), EngineError>; fn on_create(&self) -> Result<(), EngineError>; fn on_create_error(&self) -> Result<(), EngineError>; fn on_delete(&self) -> Result<(), EngineError>; @@ -29,17 +29,7 @@ pub trait ContainerRegistry: Listen + ToTransmitter { fn pull(&self, image: &Image) -> Result; fn push(&self, image: &Image, force_push: bool) -> Result; fn push_error(&self, image: &Image) -> Result; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } + fn logger(&self) -> &dyn Logger; fn get_event_details(&self) -> EventDetails { let context = self.context(); EventDetails::new( diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 5102a145..1f8f18f9 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -1,6 +1,7 @@ extern crate scaleway_api_rs; use crate::cloud_provider::scaleway::application::ScwZone; +use std::borrow::Borrow; use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; @@ -8,9 +9,9 @@ use crate::container_registry::docker::{ docker_login, docker_manifest_inspect, docker_pull_image, docker_tag_and_push_image, }; use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; -use crate::error::{EngineError, EngineErrorCause}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; +use crate::errors::{CommandError, EngineError}; +use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; +use crate::logger::{LogLevel, Logger}; use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; @@ -29,6 +30,7 @@ pub struct ScalewayCR { secret_token: String, zone: ScwZone, listeners: Listeners, + logger: Box, } impl ScalewayCR { @@ -39,6 +41,7 @@ impl ScalewayCR { secret_token: &str, default_project_id: &str, zone: ScwZone, + logger: Box, ) -> ScalewayCR { ScalewayCR { context, @@ -49,6 +52,7 @@ impl ScalewayCR { secret_token: secret_token.to_string(), zone, listeners: Vec::new(), + logger, } } @@ -86,9 +90,15 @@ impl ScalewayCR { )) { Ok(res) => res.namespaces, Err(e) => { - error!( - "Error while interacting with Scaleway API (list_namespaces), error: {}, image: {}", - e, &image.name + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning( + self.get_event_details(), + EventMessage::new( + "Error while interacting with Scaleway API (list_namespaces).".to_string(), + Some(format!("error: {}, image: {}", e, &image.name)), + ), + ), ); return None; } @@ -123,9 +133,15 @@ impl ScalewayCR { )) { Ok(res) => res.images, Err(e) => { - error!( - "Error while interacting with Scaleway API (list_images), error: {}, image: {}", - e, &image.name + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning( + self.get_event_details(), + EventMessage::new( + "Error while interacting with Scaleway API (list_namespaces).".to_string(), + Some(format!("error: {}, image: {}", e, &image.name)), + ), + ), ); return None; } @@ -145,13 +161,20 @@ impl ScalewayCR { } pub fn delete_image(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); + // https://developers.scaleway.com/en/products/registry/api/#delete-67dbf7 let image_to_delete = self.get_image(image); if image_to_delete.is_none() { - let message = format!("While tyring to delete image {}, image doesn't exist", &image.name,); - error!("{}", message); + let err = EngineError::new_container_registry_image_doesnt_exist( + event_details.clone(), + image.name.to_string(), + None, + ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + + return Err(err); } let image_to_delete = image_to_delete.unwrap(); @@ -163,43 +186,61 @@ impl ScalewayCR { )) { Ok(res) => Ok(res), Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (delete_image), error: {}, image: {}", - e, &image.name + let err = EngineError::new_container_registry_delete_image_error( + event_details.clone(), + image.name.to_string(), + Some(CommandError::new(e.to_string(), None)), ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) + self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + + Err(err) } } } fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { // https://www.scaleway.com/en/docs/deploy-an-image-from-registry-to-kubernetes-kapsule/ - match docker_tag_and_push_image(self.kind(), self.get_docker_envs(), &image, dest, dest_latest_tag) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )) - } - }; + let event_details = self.get_event_details(); + + if let Err(e) = docker_tag_and_push_image( + self.kind(), + self.get_docker_envs(), + image, + dest.to_string(), + dest_latest_tag.to_string(), + event_details.clone(), + self.logger(), + ) { + return Err(EngineError::new_docker_push_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, + )); + } let result = retry::retry(Fibonacci::from_millis(10000).take(10), || { match self.does_image_exists(image) { true => OperationResult::Ok(&image), false => { - warn!("image is not yet available on Scaleway Registry Namespace, retrying in a few seconds..."); + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning( + self.get_event_details(), + EventMessage::new_from_safe( + "Image is not yet available on Scaleway Registry Namespace, retrying in a few seconds...".to_string(), + ), + ), + ); OperationResult::Retry(()) } } }); - let image_not_reachable = Err(self.engine_error( - EngineErrorCause::Internal, - "image has been pushed on Scaleway Registry Namespace but is not yet available after 4min. Please try to redeploy in a few minutes".to_string(), + let image_not_reachable = Err(EngineError::new_container_registry_image_unreachable_after_push( + event_details.clone(), + image.name.to_string(), )); match result { @@ -210,16 +251,22 @@ impl ScalewayCR { } fn pull_image(&self, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), self.get_docker_envs(), dest) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), - )) - } - }; + let event_details = self.get_event_details(); + + if let Err(e) = docker_pull_image( + self.kind(), + self.get_docker_envs(), + dest.to_string(), + event_details.clone(), + self.logger(), + ) { + return Err(EngineError::new_docker_pull_image_error( + event_details, + image.name.to_string(), + dest.to_string(), + e, + )); + } Ok(PullResult::Some(image.clone())) } @@ -228,6 +275,8 @@ impl ScalewayCR { &self, image: &Image, ) -> Result { + let event_details = self.get_event_details(); + // https://developers.scaleway.com/en/products/registry/api/#post-7a8fcc match block_on(scaleway_api_rs::apis::namespaces_api::create_namespace( &self.get_configuration(), @@ -242,13 +291,17 @@ impl ScalewayCR { )) { Ok(res) => Ok(res), Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (create_namespace), error: {}, image: {}", - e, &image.name + let error = EngineError::new_container_registry_namespace_creation_error( + event_details.clone(), + image.name.clone(), + self.name_with_id(), + CommandError::new(e.to_string(), Some("Can't create SCW repository".to_string())), ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) + self.logger + .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + + Err(error) } } } @@ -258,15 +311,23 @@ impl ScalewayCR { image: &Image, ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-c1ac9b + let event_details = self.get_event_details(); let registry_to_delete = self.get_registry_namespace(image); + let repository_name = match image.registry_name.as_ref() { + None => "unknown", + Some(name) => name, + }; if registry_to_delete.is_none() { - let message = format!( - "While tyring to delete registry namespace for image {}, registry namespace doesn't exist", - &image.name, + let error = EngineError::new_container_registry_repository_doesnt_exist( + event_details.clone(), + repository_name.to_string(), + None, ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + self.logger + .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + + return Err(error); } let registry_to_delete = registry_to_delete.unwrap(); @@ -278,13 +339,16 @@ impl ScalewayCR { )) { Ok(res) => Ok(res), Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (delete_namespace), error: {}, image: {}", - e, &image.name + let error = EngineError::new_container_registry_delete_repository_error( + event_details.clone(), + repository_name.to_string(), + Some(CommandError::new(e.to_string(), None)), ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) + self.logger + .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + + return Err(error); } } } @@ -294,9 +358,16 @@ impl ScalewayCR { image: &Image, ) -> Result { // check if the repository already exists + let event_details = self.get_event_details(); let registry_namespace = self.get_registry_namespace(&image); if let Some(namespace) = registry_namespace { - info!("Scaleway registry namespace {} already exists", image.name.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("SCW repository {} already exists", image.name.as_str())), + ), + ); return Ok(namespace); } @@ -315,21 +386,20 @@ impl ScalewayCR { } fn exec_docker_login(&self, registry_url: &String) -> Result<(), EngineError> { + let event_details = self.get_event_details(); if docker_login( Kind::ScalewayCr, self.get_docker_envs(), self.login.clone(), self.secret_token.clone(), registry_url.clone(), + event_details.clone(), + self.logger(), ) .is_err() { - return Err(self.engine_error( - EngineErrorCause::User( - "Your Scaleway account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to Scaleway {}", self.name_with_id()), + return Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, )); }; @@ -360,7 +430,7 @@ impl ContainerRegistry for ScalewayCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn is_valid(&self) -> Result<(), EngineError> { Ok(()) } @@ -381,6 +451,7 @@ impl ContainerRegistry for ScalewayCR { } fn does_image_exists(&self, image: &Image) -> bool { + let event_details = self.get_event_details(); let registry_url = image .registry_url .as_ref() @@ -393,6 +464,8 @@ impl ContainerRegistry for ScalewayCR { self.login.clone(), self.secret_token.clone(), registry_url.clone(), + event_details.clone(), + self.logger(), ) { return false; } @@ -403,11 +476,14 @@ impl ContainerRegistry for ScalewayCR { image.name.clone(), image.tag.clone(), registry_url, + event_details.clone(), + self.logger(), ) - .is_some() + .is_ok() } fn pull(&self, image: &Image) -> Result { + let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); let mut image = image.clone(); @@ -415,9 +491,15 @@ impl ContainerRegistry for ScalewayCR { match self.get_or_create_registry_namespace(&image) { Ok(registry) => { - info!( - "Scaleway registry namespace for {} has been created", - image.name.as_str() + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Scaleway registry namespace for {} has been created", + image.name.as_str() + )), + ), ); image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name image.registry_url = registry.endpoint.clone(); @@ -426,18 +508,21 @@ impl ContainerRegistry for ScalewayCR { registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string()); } Err(e) => { - error!( - "Scaleway registry namespace for {} cannot be created, error: {:?}", - image.name.as_str(), - e - ); + self.logger.log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); return Err(e); } } if !self.does_image_exists(&image) { - let info_message = format!("image {:?} does not exist in SCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + let info_message = format!("Image {:?} does not exist in SCR {} repository", image, self.name()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -452,7 +537,14 @@ impl ContainerRegistry for ScalewayCR { } let info_message = format!("pull image {:?} from SCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); + + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -472,16 +564,13 @@ impl ContainerRegistry for ScalewayCR { } fn push(&self, image: &Image, force_push: bool) -> Result { + let event_details = self.get_event_details(); let mut image = image.clone(); let registry_url: String; let registry_name: String; match self.get_or_create_registry_namespace(&image) { Ok(registry) => { - info!( - "Scaleway registry namespace for {} has been created", - image.name.as_str() - ); image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name image.registry_url = registry.endpoint.clone(); image.registry_secret = Some(self.secret_token.clone()); @@ -490,11 +579,7 @@ impl ContainerRegistry for ScalewayCR { registry_name = registry.name.unwrap(); } Err(e) => { - error!( - "Scaleway registry namespace for {} cannot be created, error: {:?}", - image.name.as_str(), - e - ); + self.logger.log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); return Err(e); } } @@ -512,7 +597,13 @@ impl ContainerRegistry for ScalewayCR { image, registry_name, ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -532,7 +623,13 @@ impl ContainerRegistry for ScalewayCR { self.name() ); - info!("{}", info_message.as_str()); + self.logger.log( + LogLevel::Info, + EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(info_message.to_string()), + ), + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -550,6 +647,10 @@ impl ContainerRegistry for ScalewayCR { fn push_error(&self, image: &Image) -> Result { Ok(PushResult { image: image.clone() }) } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } } impl Listen for ScalewayCR { diff --git a/src/errors/io.rs b/src/errors/io.rs index 8577a365..529cc9f1 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -99,6 +99,14 @@ pub enum Tag { DockerPushImageError, DockerPullImageError, BuilderDockerCannotListImages, + ContainerRegistryRepositoryCreationError, + ContainerRegistryRepositorySetLifecycleError, + ContainerRegistryGetCredentialsError, + ContainerRegistryImageDoesntExist, + ContainerRegistryImageUnreachableAfterPush, + ContainerRegistryRepositoryDoesntExist, + ContainerRegistryDeleteRepositoryError, + ContainerRegistryDeleteImageError, } impl From for Tag { @@ -187,6 +195,16 @@ impl From for Tag { errors::Tag::BuilderCloningRepositoryError => Tag::BuilderCloningRepositoryError, errors::Tag::DockerPushImageError => Tag::DockerPushImageError, errors::Tag::DockerPullImageError => Tag::DockerPullImageError, + errors::Tag::ContainerRegistryRepositoryCreationError => Tag::ContainerRegistryRepositoryCreationError, + errors::Tag::ContainerRegistryRepositorySetLifecycleError => { + Tag::ContainerRegistryRepositorySetLifecycleError + } + errors::Tag::ContainerRegistryGetCredentialsError => Tag::ContainerRegistryGetCredentialsError, + errors::Tag::ContainerRegistryDeleteImageError => Tag::ContainerRegistryDeleteImageError, + errors::Tag::ContainerRegistryImageDoesntExist => Tag::ContainerRegistryImageDoesntExist, + errors::Tag::ContainerRegistryImageUnreachableAfterPush => Tag::ContainerRegistryImageUnreachableAfterPush, + errors::Tag::ContainerRegistryRepositoryDoesntExist => Tag::ContainerRegistryRepositoryDoesntExist, + errors::Tag::ContainerRegistryDeleteRepositoryError => Tag::ContainerRegistryDeleteRepositoryError, errors::Tag::BuilderDockerCannotListImages => Tag::BuilderDockerCannotListImages, } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 3cbc4d99..a409ae8f 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -3,6 +3,7 @@ pub mod io; extern crate url; use crate::cloud_provider::utilities::VersionsNumber; +use crate::cmd; use crate::cmd::helm::HelmError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; @@ -55,6 +56,17 @@ impl CommandError { } } + /// Creates a new CommandError from legacy command error. + pub fn new_from_legacy_command_error( + legacy_command_error: cmd::command::CommandError, + safe_message: Option, + ) -> Self { + CommandError { + message_raw: legacy_command_error.to_string(), + message_safe: safe_message, + } + } + /// Create a new CommandError from a CMD command. pub fn new_from_command_line( message: String, @@ -245,6 +257,22 @@ pub enum Tag { DockerPushImageError, /// DockerPullImageError: represents an error when trying to pull a docker image. DockerPullImageError, + /// ContainerRegistryRepositoryCreationError: represents an error when trying to create a repository. + ContainerRegistryRepositoryCreationError, + /// ContainerRegistryRepositorySetLifecycleError: represents an error when trying to set repository lifecycle policy. + ContainerRegistryRepositorySetLifecycleError, + /// ContainerRegistryGetCredentialsError: represents an error when trying to get container registry credentials. + ContainerRegistryGetCredentialsError, + /// ContainerRegistryDeleteImageError: represents an error while trying to delete an image. + ContainerRegistryDeleteImageError, + /// ContainerRegistryImageDoesntExist: represents an error, image doesn't exist in the registry. + ContainerRegistryImageDoesntExist, + /// ContainerRegistryImageUnreachableAfterPush: represents an error when image has been pushed but is unreachable. + ContainerRegistryImageUnreachableAfterPush, + /// ContainerRegistryRepositoryDoesntExist: represents an error, repository doesn't exist. + ContainerRegistryRepositoryDoesntExist, + /// ContainerRegistryDeleteRepositoryError: represents an error while trying to delete a repository. + ContainerRegistryDeleteRepositoryError, } #[derive(Clone, Debug)] @@ -2393,6 +2421,215 @@ impl EngineError { ) } + /// Creates new error when trying to create a new container registry namespace. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Container repository name. + /// * `registry_name`: Registry to be created. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_namespace_creation_error( + event_details: EventDetails, + repository_name: String, + registry_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error, trying to create registry `{}` in `{}`.", + registry_name, repository_name + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositoryCreationError, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + + /// Creates new error when trying to set container repository lifecycle policy. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_repository_set_lifecycle_policy_error( + event_details: EventDetails, + repository_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error, trying to set lifecycle policy repository `{}`.", + repository_name, + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositorySetLifecycleError, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + + /// Creates new error when trying to get container registry credentials. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + pub fn new_container_registry_get_credentials_error( + event_details: EventDetails, + repository_name: String, + ) -> EngineError { + let message = format!( + "Failed to retrieve credentials and endpoint URL from container registry `{}`.", + repository_name, + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryGetCredentialsError, + message.to_string(), + message.to_string(), + None, + None, + None, + ) + } + + /// Creates new error when trying to delete an image. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_delete_image_error( + event_details: EventDetails, + image_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Failed to delete image `{}`.", image_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryDeleteImageError, + message.to_string(), + message.to_string(), + raw_error, + None, + None, + ) + } + + /// Creates new error when trying to get image from a registry. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + pub fn new_container_registry_image_doesnt_exist( + event_details: EventDetails, + image_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Image `{}` doesn't exists.", image_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryImageDoesntExist, + message.to_string(), + message.to_string(), + raw_error, + None, + None, + ) + } + + /// Creates new error when image is unreachable after push. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_image_unreachable_after_push( + event_details: EventDetails, + image_name: String, + ) -> EngineError { + let message = format!( + "Image `{}` has been pushed on registry namespace but is not yet available after some time.", + image_name, + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryImageUnreachableAfterPush, + message.to_string(), + message.to_string(), + None, + None, + Some("Please try to redeploy in a few minutes.".to_string()), + ) + } + + /// Creates new error when trying to get image from a registry. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + pub fn new_container_registry_repository_doesnt_exist( + event_details: EventDetails, + repository_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Repository `{}` doesn't exists.", repository_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositoryDoesntExist, + message.to_string(), + message.to_string(), + raw_error, + None, + None, + ) + } + + /// Creates new error when trying to delete repository. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_delete_repository_error( + event_details: EventDetails, + repository_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Failed to delete repository `{}`.", repository_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryDeleteRepositoryError, + message.to_string(), + message.to_string(), + raw_error, + None, + None, + ) + } + /// Creates new error when trying to list Docker images. /// /// Arguments: diff --git a/src/transaction.rs b/src/transaction.rs index 3f69c2dc..82f3cb72 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -113,14 +113,17 @@ impl<'a> Transaction<'a> { let _ = match container_registry.pull(&image) { Ok(pull_result) => pull_result, Err(err) => { - warn!( - "{}", - err.message.clone().unwrap_or(format!( - "something goes wrong while pulling image from {:?} container registry", - container_registry.kind() - )) + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + err.clone(), + Some(EventMessage::new_from_safe( + "Something goes wrong while pulling image from container registry".to_string(), + )), + ), ); - return Err(EngineError::new_from_legacy_engine_error(err)); + + return Err(err); } }; @@ -208,8 +211,15 @@ impl<'a> Transaction<'a> { match result { Ok(tuple) => results.push(tuple), Err(err) => { - error!("error pushing docker image {:?}", err); - return Err(EngineError::new_from_legacy_engine_error(err)); + self.logger.log( + LogLevel::Error, + EngineEvent::Error( + err.clone(), + Some(EventMessage::new_from_safe("Error pushing docker image".to_string())), + ), + ); + + return Err(err); } } } diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 9241663c..a98c7d98 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -21,7 +21,7 @@ use tracing::error; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; -use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, logger, FuncTestsSecrets}; pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3; pub const AWS_TEST_REGION: AwsRegion = AwsRegion::EuWest3; @@ -50,6 +50,7 @@ pub fn container_registry_ecr(context: &Context) -> ECR { secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(), secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(), secrets.AWS_DEFAULT_REGION.unwrap().as_str(), + logger(), ) } @@ -60,6 +61,7 @@ pub fn container_registry_docker_hub(context: &Context) -> DockerHub { "my-default-docker-hub", "qoveryrd", "3b9481fe-74e7-4d7b-bc08-e147c9fd4f24", + logger(), ) } diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 38c7aadc..2ac5c132 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -7,17 +7,17 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::error::EngineError; use qovery_engine::models::{Context, Environment}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; -use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, logger, FuncTestsSecrets}; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1; @@ -38,6 +38,7 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { DOCR_ID, "default-docr-registry-qovery-do-test", secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), + logger(), ) } @@ -153,6 +154,7 @@ pub fn clean_environments( .DIGITAL_OCEAN_TOKEN .as_ref() .expect("DIGITAL_OCEAN_TOKEN is not set in secrets"), + logger(), ); // delete images created in registry diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 6dd3a0c7..2ccd5205 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -6,19 +6,19 @@ use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::error::EngineError; use qovery_engine::models::{Context, Environment}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; -use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, generate_id, logger, FuncTestsSecrets}; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Scw; use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; use tracing::error; @@ -57,6 +57,7 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { scw_secret_key.as_str(), scw_default_project_id.as_str(), SCW_TEST_ZONE, + logger(), ) } @@ -213,6 +214,7 @@ pub fn clean_environments( secret_token.as_str(), project_id.as_str(), zone, + logger(), ); // delete images created in registry From d5793b82f2573b9daa507ea2a3907be4a5e6def9 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 11 Mar 2022 16:11:36 +0100 Subject: [PATCH 10/85] feat: migrate DNS providers to new logging (#639) Ticket: ENG-1137 --- src/dns_provider/mod.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/src/dns_provider/mod.rs b/src/dns_provider/mod.rs index 7d140468..c8233c78 100644 --- a/src/dns_provider/mod.rs +++ b/src/dns_provider/mod.rs @@ -1,10 +1,9 @@ use std::net::Ipv4Addr; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use serde::{Deserialize, Serialize}; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::models::{Context, Domain, QoveryIdentifier}; pub mod cloudflare; @@ -22,18 +21,7 @@ pub trait DnsProvider: ToTransmitter { fn token(&self) -> &str; fn domain(&self) -> &Domain; fn resolvers(&self) -> Vec; - fn is_valid(&self) -> Result<(), NewEngineError>; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::DnsProvider(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } + fn is_valid(&self) -> Result<(), EngineError>; fn get_event_details(&self) -> EventDetails { let context = self.context(); EventDetails::new( From 2d3711a464c64c42f2fc71765be1d391475e8280 Mon Sep 17 00:00:00 2001 From: enzo Date: Thu, 10 Mar 2022 16:58:01 +0100 Subject: [PATCH 11/85] fix: kubeconfig issues in tests --- .../digitalocean/kubernetes/mod.rs | 45 +--- src/cloud_provider/scaleway/kubernetes/mod.rs | 44 +--- test_utilities/src/aws.rs | 25 +- test_utilities/src/cloudflare.rs | 4 +- test_utilities/src/common.rs | 242 ++++++++++-------- test_utilities/src/digitalocean.rs | 26 +- test_utilities/src/scaleway.rs | 26 +- tests/aws/aws_databases.rs | 64 ++++- tests/aws/aws_environment.rs | 168 +++++++++--- tests/aws/aws_kubernetes.rs | 30 +-- tests/aws/aws_whole_enchilada.rs | 3 +- tests/digitalocean/do_databases.rs | 59 ++++- tests/digitalocean/do_environment.rs | 177 ++++++++++--- tests/digitalocean/do_kubernetes.rs | 8 +- ...do_utility_kubernetes_doks_test_cluster.rs | 5 +- tests/digitalocean/do_whole_enchilada.rs | 3 +- tests/scaleway/scw_databases.rs | 60 ++++- tests/scaleway/scw_environment.rs | 195 +++++++++++--- tests/scaleway/scw_kubernetes.rs | 20 +- ...utility_kubernetes_kapsule_test_cluster.rs | 5 +- tests/scaleway/scw_whole_enchilada.rs | 3 +- 21 files changed, 824 insertions(+), 388 deletions(-) diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index b9364d63..a807cdb0 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -653,17 +653,13 @@ impl DOKS { } // push config file to object storage + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); let kubeconfig_name = format!("{}.yaml", self.id()); if let Err(e) = self.spaces.put( self.kubeconfig_bucket_name().as_str(), kubeconfig_name.as_str(), - format!( - "{}/{}/{}", - temp_dir.as_str(), - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str() - ) - .as_str(), + kubeconfig_path.to_str().expect("No path for Kubeconfig"), ) { let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( event_details.clone(), @@ -696,9 +692,6 @@ impl DOKS { }; // kubernetes helm deployments on the cluster - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - let credentials_environment_variables: Vec<(String, String)> = self .cloud_provider .credentials_environment_variables() @@ -911,7 +904,7 @@ impl DOKS { fn delete(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; + let skip_kubernetes_step = false; self.send_to_customer( format!("Preparing to delete DOKS cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -961,23 +954,6 @@ impl DOKS { )); } - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); - - skip_kubernetes_step = true; - "".to_string() - } - }; - // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process let message = format!( @@ -1009,6 +985,9 @@ impl DOKS { ); }; + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); + if !skip_kubernetes_step { // should make the diff between all namespaces and qovery managed namespaces let message = format!( @@ -1023,7 +1002,7 @@ impl DOKS { self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), ); @@ -1042,7 +1021,7 @@ impl DOKS { for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { @@ -1101,7 +1080,7 @@ impl DOKS { // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new( - &kubernetes_config_file_path, + &kubeconfig_path, &self.cloud_provider.credentials_environment_variables(), ) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1111,7 +1090,7 @@ impl DOKS { // required to avoid namespace stuck on deletion uninstall_cert_manager( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), event_details.clone(), self.logger(), @@ -1165,7 +1144,7 @@ impl DOKS { for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, qovery_namespace, self.cloud_provider().credentials_environment_variables(), ); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index a708edae..cfa0bea2 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -786,17 +786,13 @@ impl Kapsule { } // push config file to object storage + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); let kubeconfig_name = format!("{}.yaml", self.id()); if let Err(e) = self.object_storage.put( self.kubeconfig_bucket_name().as_str(), kubeconfig_name.as_str(), - format!( - "{}/{}/{}", - temp_dir.as_str(), - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str() - ) - .as_str(), + kubeconfig_path.to_str().expect("No path for Kubeconfig"), ) { let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( event_details.clone(), @@ -1015,9 +1011,6 @@ impl Kapsule { }; // kubernetes helm deployments on the cluster - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - let credentials_environment_variables: Vec<(String, String)> = self .cloud_provider .credentials_environment_variables() @@ -1325,7 +1318,7 @@ impl Kapsule { fn delete(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; + let skip_kubernetes_step = false; self.send_to_customer( format!("Preparing to delete SCW cluster {} with id {}", self.name(), self.id()).as_str(), @@ -1372,22 +1365,6 @@ impl Kapsule { )); } - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); - skip_kubernetes_step = true; - "".to_string() - } - }; - // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process let message = format!( @@ -1419,6 +1396,9 @@ impl Kapsule { ); }; + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); + if !skip_kubernetes_step { // should make the diff between all namespaces and qovery managed namespaces let message = format!( @@ -1433,7 +1413,7 @@ impl Kapsule { self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), ); @@ -1452,7 +1432,7 @@ impl Kapsule { for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { @@ -1511,7 +1491,7 @@ impl Kapsule { // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new( - &kubernetes_config_file_path, + &kubeconfig_path, &self.cloud_provider.credentials_environment_variables(), ) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1521,7 +1501,7 @@ impl Kapsule { // required to avoid namespace stuck on deletion uninstall_cert_manager( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), event_details.clone(), self.logger(), @@ -1575,7 +1555,7 @@ impl Kapsule { for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, qovery_namespace, self.cloud_provider().credentials_environment_variables(), ); diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index a98c7d98..4ef23bc8 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -65,8 +65,26 @@ pub fn container_registry_docker_hub(context: &Context) -> DockerHub { ) } +pub fn aws_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + AWS::docker_cr_engine( + &context, + logger, + AWS_TEST_REGION.to_string().as_str(), + AWS_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, + ) +} + impl Cluster for AWS { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use ECR let container_registry = Box::new(container_registry_ecr(context)); @@ -75,8 +93,7 @@ impl Cluster for AWS { // use AWS let cloud_provider: Arc> = Arc::new(AWS::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let k = get_environment_test_kubernetes( Aws, @@ -84,6 +101,8 @@ impl Cluster for AWS { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), ); EngineConfig::new( diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs index ab8a96f9..037b2456 100644 --- a/test_utilities/src/cloudflare.rs +++ b/test_utilities/src/cloudflare.rs @@ -4,10 +4,10 @@ use qovery_engine::dns_provider::cloudflare::Cloudflare; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::models::{Context, Domain}; -pub fn dns_provider_cloudflare(context: &Context, domain: ClusterDomain) -> Box { +pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { let secrets = FuncTestsSecrets::new(); let domain = Domain::new(match domain { - ClusterDomain::Custom(domain) => domain, + ClusterDomain::Custom(domain) => domain.to_string(), ClusterDomain::Default => secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set"), }); Box::new(Cloudflare::new( diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index f2e23f4f..f2fa93e4 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -10,13 +10,12 @@ use qovery_engine::models::{ GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; -use crate::aws::AWS_KUBERNETES_VERSION; -use crate::cloudflare::dns_provider_cloudflare; -use crate::digitalocean::DO_KUBERNETES_VERSION; -use crate::scaleway::SCW_KUBERNETES_VERSION; +use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; +use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION}; +use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use crate::utilities::{ - db_disk_type, db_infos, db_instance_type, generate_cluster_id, generate_id, generate_password, get_pvc, get_svc, - get_svc_name, init, FuncTestsSecrets, + db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init, + FuncTestsSecrets, }; use base64; use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; @@ -55,7 +54,14 @@ pub enum ClusterDomain { } pub trait Cluster { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig; + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig; fn cloud_provider(context: &Context) -> Box; fn kubernetes_nodes() -> Vec; fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option) -> U; @@ -68,6 +74,7 @@ pub trait Infrastructure { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; fn pause_environment( &self, @@ -75,6 +82,7 @@ pub trait Infrastructure { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; fn delete_environment( &self, @@ -82,6 +90,7 @@ pub trait Infrastructure { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; } @@ -92,14 +101,9 @@ impl Infrastructure for Environment { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; - - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let _ = tx.deploy_environment_with_options( &environment_action, DeploymentOption { @@ -117,14 +121,9 @@ impl Infrastructure for Environment { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; - - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let _ = tx.pause_environment(&environment_action); tx.commit() @@ -136,14 +135,9 @@ impl Infrastructure for Environment { context: &Context, environment_action: &EnvironmentAction, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; - - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let _ = tx.delete_environment(&environment_action); tx.commit() @@ -1082,7 +1076,40 @@ pub fn test_db( let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(provider_kind.clone(), &context, &ea, logger.clone()); + let (localisation, kubernetes_version) = match provider_kind { + Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), + Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), + Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + }; + + let engine_config = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment.deploy_environment(provider_kind.clone(), &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match database_mode.clone() { @@ -1145,7 +1172,13 @@ pub fn test_db( } } - let ret = environment_delete.delete_environment(provider_kind.clone(), &context_for_delete, &ea_delete, logger); + let ret = environment_delete.delete_environment( + provider_kind.clone(), + &context_for_delete, + &ea_delete, + logger, + &engine_config, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1157,27 +1190,24 @@ pub fn get_environment_test_kubernetes<'a>( cloud_provider: Arc>, dns_provider: Arc>, logger: Box, + localisation: &str, + kubernetes_version: &str, ) -> Box { let secrets = FuncTestsSecrets::new(); let k: Box; match provider_kind { Kind::Aws => { - let region = secrets - .AWS_DEFAULT_REGION - .as_ref() - .expect("AWS_DEFAULT_REGION is not set") - .as_str(); - let aws_region = AwsRegion::from_str(region).expect("wrong AWS region name, please ensure it's correct"); + let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); k = Box::new( EKS::new( context.clone(), context.cluster_id(), uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()).as_str(), - AWS_KUBERNETES_VERSION, - aws_region.clone(), - aws_region.get_zones_to_string(), + kubernetes_version, + region.clone(), + region.get_zones_to_string(), cloud_provider, dns_provider, AWS::kubernetes_cluster_options(secrets.clone(), None), @@ -1188,6 +1218,7 @@ pub fn get_environment_test_kubernetes<'a>( ); } Kind::Do => { + let region = DoRegion::from_str(localisation).expect("DO region not supported"); k = Box::new( DOKS::new( context.clone(), @@ -1195,14 +1226,7 @@ pub fn get_environment_test_kubernetes<'a>( uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()), DO_KUBERNETES_VERSION.to_string(), - DoRegion::from_str( - secrets - .clone() - .DIGITAL_OCEAN_DEFAULT_REGION - .expect("DIGITAL_OCEAN_DEFAULT_REGION is not set") - .as_str(), - ) - .unwrap(), + region, cloud_provider, dns_provider, DO::kubernetes_nodes(), @@ -1213,6 +1237,7 @@ pub fn get_environment_test_kubernetes<'a>( ); } Kind::Scw => { + let zone = ScwZone::from_str(localisation).expect("SCW zone not supported"); k = Box::new( Kapsule::new( context.clone(), @@ -1220,14 +1245,7 @@ pub fn get_environment_test_kubernetes<'a>( uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()), SCW_KUBERNETES_VERSION.to_string(), - ScwZone::from_str( - secrets - .clone() - .SCALEWAY_DEFAULT_REGION - .expect("SCALEWAY_DEFAULT_REGION is not set") - .as_str(), - ) - .unwrap(), + zone, cloud_provider, dns_provider, Scaleway::kubernetes_nodes(), @@ -1330,11 +1348,10 @@ pub fn cluster_test( logger: Box, localisation: &str, aws_zones: Option>, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, - cluster_domain: ClusterDomain, + cluster_domain: &ClusterDomain, vpc_network_mode: Option, environment_to_deploy: Option<&EnvironmentAction>, ) -> String { @@ -1342,15 +1359,33 @@ pub fn cluster_test( let span = span!(Level::INFO, "test", name = test_name); let _enter = span.enter(); - - let cluster_id = generate_cluster_id(localisation.clone()); - let cluster_name = generate_cluster_id(localisation.clone()); let boot_version = format!("{}.{}", major_boot_version, minor_boot_version.clone()); let engine = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(&context, logger.clone()), - Kind::Do => DO::docker_cr_engine(&context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(&context, logger.clone()), + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), }; let mut deploy_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut delete_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); @@ -1362,28 +1397,6 @@ pub fn cluster_test( } }; - let dns_provider = Arc::new(dns_provider_cloudflare(&context, cluster_domain)); - let cp: Arc> = match provider_kind { - Kind::Aws => Arc::new(AWS::cloud_provider(&context)), - Kind::Do => Arc::new(DO::cloud_provider(&context)), - Kind::Scw => Arc::new(Scaleway::cloud_provider(&context)), - }; - - let kubernetes = get_cluster_test_kubernetes( - provider_kind.clone(), - secrets.clone(), - &context, - cluster_id.clone(), - cluster_name.clone(), - boot_version.clone(), - localisation.clone(), - aws_zones.clone(), - cp.clone(), - dns_provider.clone(), - vpc_network_mode.clone(), - logger.clone(), - ); - // Deploy if let Err(err) = deploy_tx.create_kubernetes() { panic!("{:?}", err) @@ -1404,10 +1417,11 @@ pub fn cluster_test( } if let Err(err) = metrics_server_test( - kubernetes + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - kubernetes.cloud_provider().credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } @@ -1433,30 +1447,43 @@ pub fn cluster_test( assert!(matches!(resume_tx.commit(), TransactionResult::Ok)); if let Err(err) = metrics_server_test( - kubernetes + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - kubernetes.cloud_provider().credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } } ClusterTestType::WithUpgrade => { let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version.clone() + 1); - let upgraded_kubernetes = get_cluster_test_kubernetes( - provider_kind.clone(), - secrets.clone(), - &context, - cluster_id.clone(), - cluster_name.clone(), - upgrade_to_version.clone(), - localisation.clone(), - aws_zones, - cp, - dns_provider, - vpc_network_mode.clone(), - logger.clone(), - ); + let engine = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + }; let mut upgrade_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut delete_tx = @@ -1469,14 +1496,11 @@ pub fn cluster_test( assert!(matches!(upgrade_tx.commit(), TransactionResult::Ok)); if let Err(err) = metrics_server_test( - upgraded_kubernetes - .as_ref() + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - upgraded_kubernetes - .as_ref() - .cloud_provider() - .credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 2ac5c132..48bc9b86 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -1,5 +1,6 @@ use const_format::formatcp; use qovery_engine::build_platform::Image; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; use qovery_engine::cloud_provider::digitalocean::DO; @@ -42,8 +43,26 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { ) } +pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + DO::docker_cr_engine( + &context, + logger, + DO_TEST_REGION.to_string().as_str(), + DO_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, + ) +} + impl Cluster for DO { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use DigitalOcean Container Registry let container_registry = Box::new(container_registry_digital_ocean(context)); // use LocalDocker @@ -51,8 +70,7 @@ impl Cluster for DO { // use Digital Ocean let cloud_provider: Arc> = Arc::new(Self::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let k = get_environment_test_kubernetes( Do, @@ -60,6 +78,8 @@ impl Cluster for DO { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), ); EngineConfig::new( diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 2ccd5205..e034f26d 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -14,6 +14,7 @@ use crate::cloudflare::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, generate_id, logger, FuncTestsSecrets}; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Scw; @@ -61,8 +62,26 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { ) } +pub fn scw_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + Scaleway::docker_cr_engine( + &context, + logger, + SCW_TEST_ZONE.to_string().as_str(), + SCW_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, + ) +} + impl Cluster for Scaleway { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use Scaleway CR let container_registry = Box::new(container_registry_scw(context)); @@ -71,8 +90,7 @@ impl Cluster for Scaleway { // use Scaleway let cloud_provider: Arc> = Arc::new(Self::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let cluster = get_environment_test_kubernetes( Scw, @@ -80,6 +98,8 @@ impl Cluster for Scaleway { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), ); EngineConfig::new( diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 13b53f17..2a408b15 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -5,15 +5,17 @@ use qovery_engine::cloud_provider::Kind; use qovery_engine::models::{ Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, }; +use test_utilities::aws::{aws_default_engine_config, AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; use tracing::{span, Level}; use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TYPE}; use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; +use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; -use test_utilities::common::{test_db, Infrastructure}; +use test_utilities::common::{test_db, Cluster, ClusterDomain, Infrastructure}; /** ** @@ -47,7 +49,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -64,10 +68,16 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -99,7 +109,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -117,10 +129,10 @@ fn deploy_an_environment_with_db_and_pause_it() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.pause_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -135,7 +147,13 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -168,7 +186,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -195,7 +215,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -205,7 +225,13 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_for_deletion, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_for_deletion, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -238,8 +264,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = aws_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -314,11 +343,16 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = - environment_to_redeploy.deploy_environment(Kind::Aws, &context_for_redeploy, &ea_redeploy, logger.clone()); + let ret = environment_to_redeploy.deploy_environment( + Kind::Aws, + &context_for_redeploy, + &ea_redeploy, + logger.clone(), + &engine_config_for_redeploy, + ); assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY @@ -334,7 +368,13 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 1de17677..50fa14bc 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -13,7 +13,7 @@ use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, S use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::time::SystemTime; -use test_utilities::aws::container_registry_ecr; +use test_utilities::aws::{aws_default_engine_config, container_registry_ecr, AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; use test_utilities::utilities::{build_platform_local_docker, context, init, kubernetes_config_path}; use tracing::{span, Level}; @@ -45,7 +45,10 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); + let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -62,10 +65,16 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_for_delete.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -95,6 +104,7 @@ fn test_build_cache() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger()); let environment = test_utilities::common::working_minimal_environment( &context, @@ -187,7 +197,10 @@ fn deploy_a_working_environment_and_pause_it_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); + let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -200,7 +213,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.clone().applications[0].id); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -213,7 +226,13 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment(Kind::Aws, &context_for_delete, &ea, logger.clone()); + let ret = environment.pause_environment( + Kind::Aws, + &context_for_delete, + &ea, + logger.clone(), + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -264,7 +283,8 @@ fn deploy_a_working_environment_and_pause_it_eks() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let ret = environment.deploy_environment(Kind::Aws, &ctx_resume, &ea, logger.clone()); + let engine_config_resume = aws_default_engine_config(&ctx_resume, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &ctx_resume, &ea, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -317,7 +337,8 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(filtered_pdb); // Cleanup - let ret = environment.delete_environment(Kind::Aws, &context_for_delete, &ea, logger); + let ret = + environment.delete_environment(Kind::Aws, &context_for_delete, &ea, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -348,7 +369,9 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -365,10 +388,16 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -403,7 +432,9 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -437,10 +468,16 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -472,7 +509,9 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -506,10 +545,16 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -541,7 +586,9 @@ fn deploy_a_working_environment_with_domain() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -556,10 +603,16 @@ fn deploy_a_working_environment_with_domain() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -591,7 +644,9 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -625,7 +680,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { @@ -636,7 +691,13 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { Err(_) => assert!(false), }; - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -669,8 +730,11 @@ fn redeploy_same_app_with_ebs() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = aws_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -707,7 +771,7 @@ fn redeploy_same_app_with_ebs() { let ea2 = EnvironmentAction::Environment(environment_redeploy.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { @@ -727,7 +791,8 @@ fn redeploy_same_app_with_ebs() { secrets.clone(), ); - let ret = environment_redeploy.deploy_environment(Kind::Aws, &context_bis, &ea2, logger.clone()); + let ret = + environment_redeploy.deploy_environment(Kind::Aws, &context_bis, &ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -739,7 +804,13 @@ fn redeploy_same_app_with_ebs() { ); //nothing change in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Aws, + &context_for_deletion, + &ea_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -771,8 +842,11 @@ fn deploy_a_not_working_environment_and_after_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = aws_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -808,13 +882,20 @@ fn deploy_a_not_working_environment_and_after_working_environment() { &context_for_not_working, &ea_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_for_delete.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -850,6 +931,7 @@ fn deploy_ok_fail_fail_ok_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -860,6 +942,7 @@ fn deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = aws_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -875,10 +958,12 @@ fn deploy_ok_fail_fail_ok_environment() { // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = aws_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; @@ -888,7 +973,7 @@ fn deploy_ok_fail_fail_ok_environment() { let ea_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // FAIL and rollback @@ -897,6 +982,7 @@ fn deploy_ok_fail_fail_ok_environment() { &context_for_not_working_1, &ea_not_working_1, logger.clone(), + &engine_config_for_not_working_1, ); assert!(matches!( ret, @@ -909,6 +995,7 @@ fn deploy_ok_fail_fail_ok_environment() { &context_for_not_working_2, &ea_not_working_2, logger.clone(), + &engine_config_for_not_working_2, ); assert!(matches!( ret, @@ -916,10 +1003,16 @@ fn deploy_ok_fail_fail_ok_environment() { )); // Should be working - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = delete_env.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -951,6 +1044,7 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -960,16 +1054,23 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(delete_env.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = delete_env.delete_environment( + Kind::Aws, + &context_for_delete, + &ea_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1001,7 +1102,9 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { .expect("AWS_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -1017,14 +1120,19 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Aws, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // checking if cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let ret = - environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &env_action_for_delete, logger); + let ret = environment_for_delete.delete_environment( + Kind::Aws, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); test_name.to_string() diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index da79ca77..e09c5ddf 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -9,14 +9,13 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::Kind; +use std::borrow::Borrow; use std::str::FromStr; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-aws-infra")] fn create_and_destroy_eks_cluster( region: String, - zones: Vec, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -25,6 +24,7 @@ fn create_and_destroy_eks_cluster( ) { engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let zones = region.get_zones(); cluster_test( test_name, Kind::Aws, @@ -35,11 +35,10 @@ fn create_and_destroy_eks_cluster( logger(), region.to_aws_format().as_str(), Some(zones), - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, Option::from(vpc_network_mode), None, ) @@ -55,13 +54,9 @@ fn create_and_destroy_eks_cluster( #[named] #[test] fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -74,13 +69,9 @@ fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() { #[named] #[test] fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -93,13 +84,9 @@ fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() { #[named] #[test] fn create_and_destroy_eks_cluster_in_us_east_2() { - let secrets = FuncTestsSecrets::new(); let region = "us-east-2".to_string(); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -133,14 +120,9 @@ fn create_pause_and_destroy_eks_cluster_in_us_east_2() { #[test] #[ignore] fn create_upgrade_and_destroy_eks_cluster_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); - + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::WithUpgrade, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index ca9a8b90..9de34a04 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -43,11 +43,10 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { logger(), ®ion, Some(aws_zones), - secrets.clone(), ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), Some(WithNatGateways), Some(&env_action), ) diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 478b1c1b..430e9ecb 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -13,8 +13,9 @@ use test_utilities::utilities::{ use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::{database_test_environment, test_db, working_minimal_environment, Infrastructure}; use test_utilities::digitalocean::{ - clean_environments, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, - DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, DO_TEST_REGION, + clean_environments, do_default_engine_config, DO_KUBERNETES_VERSION, DO_MANAGED_DATABASE_DISK_TYPE, + DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, + DO_TEST_REGION, }; /** @@ -47,7 +48,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -65,10 +68,16 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Do, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -103,7 +112,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -121,10 +132,11 @@ fn deploy_an_environment_with_db_and_pause_it() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action.clone(), logger.clone()); + let ret = + environment.deploy_environment(Kind::Do, &context, &env_action.clone(), logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.pause_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -139,7 +151,13 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Do, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -175,7 +193,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -202,7 +222,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -212,8 +232,13 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = - environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_for_deletion, logger); + let ret = environment_delete.delete_environment( + Kind::Do, + &context_for_deletion, + &env_action_for_deletion, + logger, + &engine_config_for_deletion, + ); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -254,8 +279,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = do_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -341,7 +369,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = environment_to_redeploy.deploy_environment( @@ -349,6 +377,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { &context_for_redeploy, &env_action_redeploy, logger.clone(), + &engine_config_for_redeploy, ); assert!(matches!(ret, TransactionResult::Ok)); @@ -365,7 +394,13 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let ret = environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let ret = environment_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index d2b04f5d..f6d878c7 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -14,7 +14,7 @@ use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::time::SystemTime; use test_utilities::common::Infrastructure; -use test_utilities::digitalocean::container_registry_digital_ocean; +use test_utilities::digitalocean::{container_registry_digital_ocean, do_default_engine_config, DO_KUBERNETES_VERSION}; use test_utilities::utilities::{build_platform_local_docker, context}; use tracing::{span, warn, Level}; @@ -44,7 +44,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -62,11 +64,16 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let ret = environment_for_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -98,6 +105,7 @@ fn test_build_cache() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger()); let environment = test_utilities::common::working_minimal_environment( &context, @@ -189,7 +197,9 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -206,11 +216,16 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let ret = environment_for_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -244,7 +259,9 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -257,7 +274,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let env_action = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -270,7 +287,13 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment(Kind::Do, &context_for_delete, &env_action, logger.clone()); + let ret = environment.pause_environment( + Kind::Do, + &context_for_delete, + &env_action, + logger.clone(), + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -286,7 +309,14 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let ret = environment.deploy_environment(Kind::Do, &ctx_resume, &env_action, logger.clone()); + let engine_config_resume = do_default_engine_config(&ctx_resume, logger.clone()); + let ret = environment.deploy_environment( + Kind::Do, + &ctx_resume, + &env_action, + logger.clone(), + &engine_config_resume, + ); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -300,7 +330,13 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let ret = environment.delete_environment(Kind::Do, &context_for_delete, &env_action, logger); + let ret = environment.delete_environment( + Kind::Do, + &context_for_delete, + &env_action, + logger, + &engine_config_for_delete, + ); assert!(matches!(ret, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -334,7 +370,9 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -369,11 +407,16 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -407,7 +450,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -423,11 +468,16 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let result = environment_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -461,8 +511,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); - + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -495,7 +546,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -506,7 +557,13 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Do, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -540,8 +597,11 @@ fn digitalocean_doks_redeploy_same_app() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = do_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -579,7 +639,7 @@ fn digitalocean_doks_redeploy_same_app() { let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -599,8 +659,13 @@ fn digitalocean_doks_redeploy_same_app() { secrets.clone(), ); - let result = - environment_redeploy.deploy_environment(Kind::Do, &context_bis, &env_action_redeploy, logger.clone()); + let result = environment_redeploy.deploy_environment( + Kind::Do, + &context_bis, + &env_action_redeploy, + logger.clone(), + &engine_config_bis, + ); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -614,7 +679,13 @@ fn digitalocean_doks_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Do, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -648,8 +719,11 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = do_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -687,12 +761,18 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme &context_for_not_working, &env_action_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -729,6 +809,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -740,6 +821,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = do_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -755,10 +837,12 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = do_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; @@ -768,7 +852,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback @@ -777,6 +861,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { &context_for_not_working_1, &env_action_not_working_1, logger.clone(), + &engine_config_for_not_working_1, ); assert!(matches!( result, @@ -789,6 +874,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { &context_for_not_working_2, &env_action_not_working_2, logger.clone(), + &engine_config_for_not_working_2, ); assert!(matches!( result, @@ -796,10 +882,16 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -833,6 +925,7 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -843,16 +936,23 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -888,7 +988,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -904,14 +1006,19 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Do, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index 35102116..5a262127 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -13,7 +13,6 @@ use test_utilities::common::{cluster_test, ClusterTestType}; #[cfg(feature = "test-do-infra")] fn create_and_destroy_doks_cluster( region: DoRegion, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -27,11 +26,10 @@ fn create_and_destroy_doks_cluster( logger(), region.as_str(), None, - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, None, None, ) @@ -43,10 +41,8 @@ fn create_and_destroy_doks_cluster( #[test] fn create_and_destroy_doks_cluster_ams_3() { let region = DoRegion::Amsterdam3; - let secrets = FuncTestsSecrets::new(); create_and_destroy_doks_cluster( region, - secrets, ClusterTestType::Classic, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, @@ -60,10 +56,8 @@ fn create_and_destroy_doks_cluster_ams_3() { #[ignore] fn create_upgrade_and_destroy_doks_cluster_in_nyc_3() { let region = DoRegion::NewYorkCity3; - let secrets = FuncTestsSecrets::new(); create_and_destroy_doks_cluster( region, - secrets, ClusterTestType::WithUpgrade, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, diff --git a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs index 934ef4f8..6f1a7dfd 100644 --- a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs +++ b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs @@ -3,6 +3,7 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; use qovery_engine::cloud_provider::digitalocean::DO; +use test_utilities::digitalocean::{do_default_engine_config, DO_KUBERNETES_VERSION, DO_TEST_REGION}; use tracing::{span, Level}; use self::test_utilities::common::Cluster; @@ -36,7 +37,7 @@ fn create_digitalocean_kubernetes_doks_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = DO::docker_cr_engine(&context, logger.clone()); + let engine = do_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy @@ -78,7 +79,7 @@ fn destroy_digitalocean_kubernetes_doks_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = DO::docker_cr_engine(&context, logger.clone()); + let engine = do_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Destroy diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index f1d3985f..4c14c90a 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -39,11 +39,10 @@ fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { logger, region.as_str(), None, - secrets.clone(), ClusterTestType::Classic, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), None, Some(&env_action), ) diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index d6a5ac36..c362b883 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -15,8 +15,9 @@ use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::{database_test_environment, Infrastructure}; use test_utilities::common::{test_db, working_minimal_environment}; use test_utilities::scaleway::{ - clean_environments, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, - SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, SCW_TEST_ZONE, + clean_environments, scw_default_engine_config, SCW_KUBERNETES_VERSION, SCW_MANAGED_DATABASE_DISK_TYPE, + SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, + SCW_TEST_ZONE, }; /** @@ -51,7 +52,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -69,11 +72,16 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -110,7 +118,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -128,10 +138,10 @@ fn deploy_an_environment_with_db_and_pause_it() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment.pause_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.pause_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -146,8 +156,13 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger.clone()); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_deletion, + &env_action_delete, + logger.clone(), + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -185,7 +200,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -211,11 +228,16 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_for_deletion, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_deletion, + &env_action_for_deletion, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -258,8 +280,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = scw_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -346,7 +371,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = environment_to_redeploy.deploy_environment( @@ -354,6 +379,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { &context_for_redeploy, &env_action_redeploy, logger.clone(), + &engine_config_for_redeploy, ); assert!(matches!(result, TransactionResult::Ok)); @@ -370,7 +396,13 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let result = environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 870c70f4..6d0be1cb 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -14,7 +14,7 @@ use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::time::SystemTime; use test_utilities::common::Infrastructure; -use test_utilities::scaleway::container_registry_scw; +use test_utilities::scaleway::{container_registry_scw, scw_default_engine_config, SCW_KUBERNETES_VERSION}; use test_utilities::utilities::build_platform_local_docker; use tracing::{span, warn, Level}; @@ -46,7 +46,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -64,11 +66,16 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -195,7 +202,9 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -213,11 +222,16 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -256,7 +270,9 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -269,7 +285,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let env_action = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -282,7 +298,13 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment(Kind::Scw, &context_for_delete, &env_action, logger.clone()); + let result = environment.pause_environment( + Kind::Scw, + &context_for_delete, + &env_action, + logger.clone(), + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -298,7 +320,14 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &env_action, logger.clone()); + let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); + let result = environment.deploy_environment( + Kind::Scw, + &ctx_resume, + &env_action, + logger.clone(), + &engine_config_resume, + ); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -312,7 +341,13 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = environment.delete_environment(Kind::Scw, &context_for_delete, &env_action, logger); + let result = environment.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -348,7 +383,9 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -383,11 +420,16 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -423,7 +465,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -439,11 +483,16 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -479,7 +528,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -513,7 +564,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -524,8 +575,13 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -560,7 +616,9 @@ fn deploy_a_working_environment_and_pause_it() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -573,7 +631,7 @@ fn deploy_a_working_environment_and_pause_it() { let ea = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &ea, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -586,7 +644,13 @@ fn deploy_a_working_environment_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment(Kind::Scw, &context_for_delete, &ea, logger.clone()); + let result = environment.pause_environment( + Kind::Scw, + &context_for_delete, + &ea, + logger.clone(), + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -602,7 +666,8 @@ fn deploy_a_working_environment_and_pause_it() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &ea, logger.clone()); + let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &ea, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -616,7 +681,8 @@ fn deploy_a_working_environment_and_pause_it() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = environment.delete_environment(Kind::Scw, &context_for_delete, &ea, logger); + let result = + environment.delete_environment(Kind::Scw, &context_for_delete, &ea, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); return test_name.to_string(); }) @@ -647,8 +713,11 @@ fn scaleway_kapsule_redeploy_same_app() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = scw_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -687,7 +756,7 @@ fn scaleway_kapsule_redeploy_same_app() { let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -707,8 +776,13 @@ fn scaleway_kapsule_redeploy_same_app() { secrets.clone(), ); - let result = - environment_redeploy.deploy_environment(Kind::Scw, &context_bis, &env_action_redeploy, logger.clone()); + let result = environment_redeploy.deploy_environment( + Kind::Scw, + &context_bis, + &env_action_redeploy, + logger.clone(), + &engine_config_bis, + ); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -722,8 +796,13 @@ fn scaleway_kapsule_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment( + Kind::Scw, + &context_for_deletion, + &env_action_delete, + logger, + &engine_config_for_deletion, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -759,8 +838,11 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = scw_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -798,14 +880,20 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen &context_for_not_working, &env_action_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -845,6 +933,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -856,6 +945,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = scw_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -871,10 +961,12 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = scw_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; @@ -884,7 +976,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback @@ -893,6 +985,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { &context_for_not_working_1, &env_action_not_working_1, logger.clone(), + &engine_config_for_not_working_1, ); assert!(matches!( result, @@ -905,6 +998,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { &context_for_not_working_2, &env_action_not_working_2, logger.clone(), + &engine_config_for_not_working_2, ); assert!(matches!( result, @@ -912,10 +1006,16 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -952,6 +1052,7 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -962,16 +1063,23 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -1007,7 +1115,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { .expect("SCALEWAY_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -1023,14 +1133,19 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_for_delete.delete_environment( + Kind::Scw, + &context_for_delete, + &env_action_for_delete, + logger, + &engine_config_for_delete, + ); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 35a01b98..58e5c9a0 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -13,7 +13,6 @@ use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-scw-infra")] fn create_and_destroy_kapsule_cluster( zone: ScwZone, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -28,11 +27,10 @@ fn create_and_destroy_kapsule_cluster( logger(), zone.as_str(), None, - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, vpc_network_mode, None, ) @@ -45,10 +43,8 @@ fn create_and_destroy_kapsule_cluster( #[test] fn create_and_destroy_kapsule_cluster_par_1() { let zone = ScwZone::Paris1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -62,10 +58,8 @@ fn create_and_destroy_kapsule_cluster_par_1() { #[test] fn create_and_destroy_kapsule_cluster_par_2() { let zone = ScwZone::Paris2; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -79,10 +73,8 @@ fn create_and_destroy_kapsule_cluster_par_2() { #[test] fn create_pause_and_destroy_kapsule_cluster_ams_1() { let zone = ScwZone::Amsterdam1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithPause, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -96,10 +88,8 @@ fn create_pause_and_destroy_kapsule_cluster_ams_1() { #[test] fn create_and_destroy_kapsule_cluster_war_1() { let zone = ScwZone::Warsaw1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -115,10 +105,8 @@ fn create_and_destroy_kapsule_cluster_war_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_par_1() { let zone = ScwZone::Paris1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -134,10 +122,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_par_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_par_2() { let zone = ScwZone::Paris2; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -153,10 +139,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_par_2() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_ams_1() { let zone = ScwZone::Amsterdam1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -172,10 +156,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_ams_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_war_1() { let zone = ScwZone::Warsaw1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, diff --git a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs index 0e1bbb46..19b8753e 100644 --- a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs +++ b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs @@ -2,6 +2,7 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; +use test_utilities::scaleway::{scw_default_engine_config, SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use tracing::{span, Level}; use self::test_utilities::common::Cluster; @@ -36,7 +37,7 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = Scaleway::docker_cr_engine(&context, logger.clone()); + let engine = scw_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy @@ -78,7 +79,7 @@ fn destroy_scaleway_kubernetes_kapsule_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = Scaleway::docker_cr_engine(&context, logger.clone()); + let engine = scw_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Destroy diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index da76f998..641dd2df 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -37,11 +37,10 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { logger, zone.as_str(), None, - secrets.clone(), ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), None, Some(&env_action), ) From 6197172c0abe29e5c0eda89c9e5605380b2990e3 Mon Sep 17 00:00:00 2001 From: enzo Date: Fri, 11 Mar 2022 16:01:05 +0100 Subject: [PATCH 12/85] fix: compil error --- tests/aws/aws_kubernetes.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index e09c5ddf..fbcae223 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -104,8 +104,6 @@ fn create_pause_and_destroy_eks_cluster_in_us_east_2() { let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::WithPause, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, From 490751032a6b02f70d5236ec066a7275872b0a66 Mon Sep 17 00:00:00 2001 From: enzo Date: Fri, 11 Mar 2022 17:15:03 +0100 Subject: [PATCH 13/85] fix: compil error --- test_utilities/src/aws.rs | 1 + test_utilities/src/common.rs | 66 ++++++---- test_utilities/src/digitalocean.rs | 1 + test_utilities/src/scaleway.rs | 1 + tests/aws/aws_databases.rs | 50 ++----- tests/aws/aws_environment.rs | 150 +++++---------------- tests/digitalocean/do_databases.rs | 45 ++----- tests/digitalocean/do_environment.rs | 144 ++++---------------- tests/scaleway/scw_container_registry.rs | 5 + tests/scaleway/scw_databases.rs | 46 ++----- tests/scaleway/scw_environment.rs | 161 +++++------------------ 11 files changed, 173 insertions(+), 497 deletions(-) diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 4ef23bc8..77871e97 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -103,6 +103,7 @@ impl Cluster for AWS { logger.clone(), localisation, kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index f2fa93e4..6bfb7081 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -70,24 +70,18 @@ pub trait Cluster { pub trait Infrastructure { fn deploy_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn pause_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn delete_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, @@ -97,8 +91,6 @@ pub trait Infrastructure { impl Infrastructure for Environment { fn deploy_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, @@ -117,8 +109,6 @@ impl Infrastructure for Environment { fn pause_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, @@ -131,8 +121,6 @@ impl Infrastructure for Environment { fn delete_environment( &self, - provider_kind: Kind, - context: &Context, environment_action: &EnvironmentAction, logger: Box, engine_config: &EngineConfig, @@ -1087,7 +1075,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - kubernetes_version, + kubernetes_version.clone(), &ClusterDomain::Default, None, ), @@ -1095,7 +1083,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - kubernetes_version, + kubernetes_version.clone(), &ClusterDomain::Default, None, ), @@ -1103,13 +1091,13 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - kubernetes_version, + kubernetes_version.clone(), &ClusterDomain::Default, None, ), }; - let ret = environment.deploy_environment(provider_kind.clone(), &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match database_mode.clone() { @@ -1172,13 +1160,34 @@ pub fn test_db( } } - let ret = environment_delete.delete_environment( - provider_kind.clone(), - &context_for_delete, - &ea_delete, - logger, - &engine_config, - ); + let engine_config_for_delete = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1192,6 +1201,7 @@ pub fn get_environment_test_kubernetes<'a>( logger: Box, localisation: &str, kubernetes_version: &str, + vpc_network_mode: Option, ) -> Box { let secrets = FuncTestsSecrets::new(); let k: Box; @@ -1199,6 +1209,10 @@ pub fn get_environment_test_kubernetes<'a>( match provider_kind { Kind::Aws => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); + let mut options = AWS::kubernetes_cluster_options(secrets, None); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } k = Box::new( EKS::new( context.clone(), @@ -1210,7 +1224,7 @@ pub fn get_environment_test_kubernetes<'a>( region.get_zones_to_string(), cloud_provider, dns_provider, - AWS::kubernetes_cluster_options(secrets.clone(), None), + options, AWS::kubernetes_nodes(), logger, ) @@ -1280,7 +1294,9 @@ pub fn get_cluster_test_kubernetes<'a>( Kind::Aws => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); - options.vpc_qovery_network_mode = vpc_network_mode.unwrap(); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); k = Box::new( EKS::new( diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 48bc9b86..31d653d6 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -80,6 +80,7 @@ impl Cluster for DO { logger.clone(), localisation, kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index e034f26d..ce61d38e 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -100,6 +100,7 @@ impl Cluster for Scaleway { logger.clone(), localisation, kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 2a408b15..43f00166 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -68,16 +68,10 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -129,10 +123,10 @@ fn deploy_an_environment_with_db_and_pause_it() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.pause_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -147,13 +141,7 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -215,7 +203,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -225,13 +213,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_for_deletion, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -343,16 +325,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_to_redeploy.deploy_environment( - Kind::Aws, - &context_for_redeploy, - &ea_redeploy, - logger.clone(), - &engine_config_for_redeploy, - ); + let ret = environment_to_redeploy.deploy_environment(&ea_redeploy, logger.clone(), &engine_config_for_redeploy); assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY @@ -368,13 +344,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 50fa14bc..cc767323 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -65,16 +65,10 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -213,7 +207,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.clone().applications[0].id); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -226,13 +220,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment( - Kind::Aws, - &context_for_delete, - &ea, - logger.clone(), - &engine_config_for_delete, - ); + let ret = environment.pause_environment(&ea, logger.clone(), &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -284,7 +272,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); let engine_config_resume = aws_default_engine_config(&ctx_resume, logger.clone()); - let ret = environment.deploy_environment(Kind::Aws, &ctx_resume, &ea, logger.clone(), &engine_config_resume); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -337,8 +325,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(filtered_pdb); // Cleanup - let ret = - environment.delete_environment(Kind::Aws, &context_for_delete, &ea, logger, &engine_config_for_delete); + let ret = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -388,16 +375,10 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -468,16 +449,10 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -545,16 +520,10 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -603,16 +572,10 @@ fn deploy_a_working_environment_with_domain() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -680,7 +643,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { @@ -691,13 +654,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { Err(_) => assert!(false), }; - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -771,7 +728,7 @@ fn redeploy_same_app_with_ebs() { let ea2 = EnvironmentAction::Environment(environment_redeploy.clone()); let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { @@ -791,8 +748,7 @@ fn redeploy_same_app_with_ebs() { secrets.clone(), ); - let ret = - environment_redeploy.deploy_environment(Kind::Aws, &context_bis, &ea2, logger.clone(), &engine_config_bis); + let ret = environment_redeploy.deploy_environment(&ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -804,13 +760,7 @@ fn redeploy_same_app_with_ebs() { ); //nothing change in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let ret = environment_delete.delete_environment( - Kind::Aws, - &context_for_deletion, - &ea_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -878,24 +828,16 @@ fn deploy_a_not_working_environment_and_after_working_environment() { let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); let ret = environment_for_not_working.deploy_environment( - Kind::Aws, - &context_for_not_working, &ea_not_working, logger.clone(), &engine_config_for_not_working, ); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -973,46 +915,30 @@ fn deploy_ok_fail_fail_ok_environment() { let ea_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // FAIL and rollback - let ret = not_working_env_1.deploy_environment( - Kind::Aws, - &context_for_not_working_1, - &ea_not_working_1, - logger.clone(), - &engine_config_for_not_working_1, - ); + let ret = + not_working_env_1.deploy_environment(&ea_not_working_1, logger.clone(), &engine_config_for_not_working_1); assert!(matches!( ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) )); // FAIL and Rollback again - let ret = not_working_env_2.deploy_environment( - Kind::Aws, - &context_for_not_working_2, - &ea_not_working_2, - logger.clone(), - &engine_config_for_not_working_2, - ); + let ret = + not_working_env_2.deploy_environment(&ea_not_working_2, logger.clone(), &engine_config_for_not_working_2); assert!(matches!( ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) )); // Should be working - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = delete_env.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1061,16 +987,10 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { let ea = EnvironmentAction::Environment(environment.clone()); let ea_delete = EnvironmentAction::Environment(delete_env.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = delete_env.delete_environment( - Kind::Aws, - &context_for_delete, - &ea_delete, - logger, - &engine_config_for_delete, - ); + let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1120,19 +1040,13 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Aws, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // checking if cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let ret = environment_for_delete.delete_environment( - Kind::Aws, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); test_name.to_string() diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 430e9ecb..873cee85 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -68,16 +68,10 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment( - Kind::Do, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -132,11 +126,10 @@ fn deploy_an_environment_with_db_and_pause_it() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = - environment.deploy_environment(Kind::Do, &context, &env_action.clone(), logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action.clone(), logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.pause_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -151,13 +144,7 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment( - Kind::Do, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -222,7 +209,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -232,13 +219,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = environment_delete.delete_environment( - Kind::Do, - &context_for_deletion, - &env_action_for_deletion, - logger, - &engine_config_for_deletion, - ); + let ret = environment_delete.delete_environment(&env_action_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -369,12 +350,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = environment_to_redeploy.deploy_environment( - Kind::Do, - &context_for_redeploy, &env_action_redeploy, logger.clone(), &engine_config_for_redeploy, @@ -394,13 +373,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let ret = environment_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index f6d878c7..5f0e3c71 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -64,16 +64,10 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -216,16 +210,10 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment_for_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -274,7 +262,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let env_action = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -287,13 +275,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment( - Kind::Do, - &context_for_delete, - &env_action, - logger.clone(), - &engine_config_for_delete, - ); + let ret = environment.pause_environment(&env_action, logger.clone(), &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -310,13 +292,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); let engine_config_resume = do_default_engine_config(&ctx_resume, logger.clone()); - let ret = environment.deploy_environment( - Kind::Do, - &ctx_resume, - &env_action, - logger.clone(), - &engine_config_resume, - ); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -330,13 +306,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let ret = environment.delete_environment( - Kind::Do, - &context_for_delete, - &env_action, - logger, - &engine_config_for_delete, - ); + let ret = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -407,16 +377,11 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_for_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -468,16 +433,10 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -546,7 +505,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -557,13 +516,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = environment_delete.delete_environment( - Kind::Do, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -639,7 +592,7 @@ fn digitalocean_doks_redeploy_same_app() { let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -659,13 +612,7 @@ fn digitalocean_doks_redeploy_same_app() { secrets.clone(), ); - let result = environment_redeploy.deploy_environment( - Kind::Do, - &context_bis, - &env_action_redeploy, - logger.clone(), - &engine_config_bis, - ); + let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -679,13 +626,7 @@ fn digitalocean_doks_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = environment_delete.delete_environment( - Kind::Do, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -757,22 +698,14 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); let result = environment_for_not_working.deploy_environment( - Kind::Do, - &context_for_not_working, &env_action_not_working, logger.clone(), &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_for_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -852,13 +785,11 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback let result = not_working_env_1.deploy_environment( - Kind::Do, - &context_for_not_working_1, &env_action_not_working_1, logger.clone(), &engine_config_for_not_working_1, @@ -870,8 +801,6 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { // FAIL and Rollback again let result = not_working_env_2.deploy_environment( - Kind::Do, - &context_for_not_working_2, &env_action_not_working_2, logger.clone(), &engine_config_for_not_working_2, @@ -882,16 +811,10 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -943,16 +866,10 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { @@ -1006,19 +923,14 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let result = environment_for_delete.delete_environment( - Kind::Do, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index a0c80992..fce2203f 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -4,6 +4,7 @@ use self::test_utilities::utilities::{context, FuncTestsSecrets}; use qovery_engine::build_platform::Image; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; +use test_utilities::utilities::logger; use tracing::debug; use uuid::Uuid; @@ -45,6 +46,7 @@ fn test_get_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + logger(), ); let image = Image { @@ -103,6 +105,7 @@ fn test_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + logger(), ); let image = Image { @@ -154,6 +157,7 @@ fn test_delete_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + logger(), ); let image = Image { @@ -200,6 +204,7 @@ fn test_get_or_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + logger(), ); let image = Image { diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index c362b883..acb7c597 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -72,16 +72,10 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -138,10 +132,10 @@ fn deploy_an_environment_with_db_and_pause_it() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment.pause_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.pause_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -156,13 +150,8 @@ fn deploy_an_environment_with_db_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_deletion, - &env_action_delete, - logger.clone(), - &engine_config_for_deletion, - ); + let result = + environment_delete.delete_environment(&env_action_delete, logger.clone(), &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -228,16 +217,11 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_deletion, - &env_action_for_deletion, - logger, - &engine_config_for_deletion, - ); + let result = + environment_delete.delete_environment(&env_action_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -371,12 +355,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = environment_to_redeploy.deploy_environment( - Kind::Scw, - &context_for_redeploy, &env_action_redeploy, logger.clone(), &engine_config_for_redeploy, @@ -396,13 +378,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 6d0be1cb..7b329967 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -66,16 +66,11 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_for_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -222,16 +217,11 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment_for_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -285,7 +275,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let env_action = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -298,13 +288,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment( - Kind::Scw, - &context_for_delete, - &env_action, - logger.clone(), - &engine_config_for_delete, - ); + let result = environment.pause_environment(&env_action, logger.clone(), &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -321,13 +305,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); - let result = environment.deploy_environment( - Kind::Scw, - &ctx_resume, - &env_action, - logger.clone(), - &engine_config_resume, - ); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -341,13 +319,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = environment.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action, - logger, - &engine_config_for_delete, - ); + let result = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -420,16 +392,11 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_for_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -483,16 +450,10 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -564,7 +525,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -575,13 +536,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -631,7 +586,7 @@ fn deploy_a_working_environment_and_pause_it() { let ea = EnvironmentAction::Environment(environment.clone()); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &ea, logger.clone(), &engine_config); + let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -644,13 +599,7 @@ fn deploy_a_working_environment_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment( - Kind::Scw, - &context_for_delete, - &ea, - logger.clone(), - &engine_config_for_delete, - ); + let result = environment.pause_environment(&ea, logger.clone(), &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -667,7 +616,7 @@ fn deploy_a_working_environment_and_pause_it() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); - let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &ea, logger.clone(), &engine_config_resume); + let result = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -681,8 +630,7 @@ fn deploy_a_working_environment_and_pause_it() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = - environment.delete_environment(Kind::Scw, &context_for_delete, &ea, logger, &engine_config_for_delete); + let result = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); return test_name.to_string(); }) @@ -756,7 +704,7 @@ fn scaleway_kapsule_redeploy_same_app() { let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -776,13 +724,7 @@ fn scaleway_kapsule_redeploy_same_app() { secrets.clone(), ); - let result = environment_redeploy.deploy_environment( - Kind::Scw, - &context_bis, - &env_action_redeploy, - logger.clone(), - &engine_config_bis, - ); + let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -796,13 +738,7 @@ fn scaleway_kapsule_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = environment_delete.delete_environment( - Kind::Scw, - &context_for_deletion, - &env_action_delete, - logger, - &engine_config_for_deletion, - ); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -876,24 +812,16 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); let result = environment_for_not_working.deploy_environment( - Kind::Scw, - &context_for_not_working, &env_action_not_working, logger.clone(), &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment_for_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -976,13 +904,11 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); // OK - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback let result = not_working_env_1.deploy_environment( - Kind::Scw, - &context_for_not_working_1, &env_action_not_working_1, logger.clone(), &engine_config_for_not_working_1, @@ -994,8 +920,6 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { // FAIL and Rollback again let result = not_working_env_2.deploy_environment( - Kind::Scw, - &context_for_not_working_2, &env_action_not_working_2, logger.clone(), &engine_config_for_not_working_2, @@ -1006,16 +930,10 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -1070,16 +988,10 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_delete, - logger, - &engine_config_for_delete, - ); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { @@ -1133,19 +1045,14 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { let env_action = EnvironmentAction::Environment(environment.clone()); let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone(), &engine_config); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); - let result = environment_for_delete.delete_environment( - Kind::Scw, - &context_for_delete, - &env_action_for_delete, - logger, - &engine_config_for_delete, - ); + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { From e0a1dff4b23802b31a785c3ba53cbaef6fe891bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 17 Mar 2022 10:44:56 +0100 Subject: [PATCH 14/85] build_refacto (#643) * build_refacto * build_refacto --- Cargo.lock | 8 + Cargo.toml | 1 + src/build_platform/local_docker.rs | 243 ++----- src/build_platform/mod.rs | 114 +-- src/cloud_provider/aws/application.rs | 37 +- .../digitalocean/application.rs | 37 +- src/cloud_provider/scaleway/application.rs | 38 +- src/cmd/docker.rs | 682 ++++++++++++++++++ src/cmd/mod.rs | 1 + src/container_registry/docker.rs | 462 ------------ src/container_registry/docker_hub.rs | 320 -------- src/container_registry/docr.rs | 344 +-------- src/container_registry/ecr.rs | 326 +-------- src/container_registry/mod.rs | 41 +- .../scaleway_container_registry.rs | 399 ++-------- src/errors/io.rs | 2 + src/errors/mod.rs | 21 + src/git.rs | 2 +- src/models.rs | 81 +-- src/transaction.rs | 240 +----- test_utilities/Cargo.lock | 8 + test_utilities/Cargo.toml | 1 + test_utilities/src/aws.rs | 13 - test_utilities/src/common.rs | 2 +- test_utilities/src/digitalocean.rs | 24 +- test_utilities/src/scaleway.rs | 9 +- test_utilities/src/utilities.rs | 3 +- tests/aws/aws_databases.rs | 7 +- tests/aws/aws_environment.rs | 101 +-- tests/aws/aws_kubernetes.rs | 3 +- tests/digitalocean/do_databases.rs | 11 +- tests/digitalocean/do_environment.rs | 99 +-- tests/digitalocean/do_kubernetes.rs | 4 +- ...do_utility_kubernetes_doks_test_cluster.rs | 4 +- .../multi_stage_simple/Dockerfile.buildkit | 10 + tests/docker/multi_stage_simple/hello.go | 7 + tests/scaleway/scw_container_registry.rs | 48 +- tests/scaleway/scw_databases.rs | 9 +- tests/scaleway/scw_environment.rs | 99 +-- tests/scaleway/scw_kubernetes.rs | 4 +- ...utility_kubernetes_kapsule_test_cluster.rs | 4 +- 41 files changed, 1113 insertions(+), 2756 deletions(-) create mode 100644 src/cmd/docker.rs delete mode 100644 src/container_registry/docker.rs delete mode 100644 src/container_registry/docker_hub.rs create mode 100644 tests/docker/multi_stage_simple/Dockerfile.buildkit create mode 100644 tests/docker/multi_stage_simple/hello.go diff --git a/Cargo.lock b/Cargo.lock index 7cae05a1..00a3bf2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2119,6 +2119,7 @@ dependencies = [ "tracing-test", "trust-dns-resolver", "url 2.2.2", + "urlencoding", "uuid 0.8.2", "walkdir", ] @@ -3283,6 +3284,7 @@ dependencies = [ "time 0.2.27", "tracing", "tracing-subscriber", + "url 2.2.2", "uuid 0.8.2", ] @@ -3939,6 +3941,12 @@ dependencies = [ "url 1.7.2", ] +[[package]] +name = "urlencoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" + [[package]] name = "uuid" version = "0.7.4" diff --git a/Cargo.toml b/Cargo.toml index 2154f995..abfed16b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ function_name = "0.2.0" thiserror = "1.0.30" strum = "0.23" strum_macros = "0.23" +urlencoding = "2.1.0" # FIXME use https://crates.io/crates/blocking instead of runtime.rs diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 591f58ac..0079c5d0 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,10 +6,11 @@ use chrono::Duration; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; -use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, CacheResult, Credentials, Image, Kind}; +use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::QoveryCommand; +use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; @@ -32,6 +33,7 @@ const BUILDPACKS_BUILDERS: [&str; 1] = [ /// use Docker in local pub struct LocalDocker { context: Context, + docker: Docker, id: String, name: String, listeners: Listeners, @@ -39,31 +41,25 @@ pub struct LocalDocker { } impl LocalDocker { - pub fn new(context: Context, id: &str, name: &str, logger: Box) -> Self { - LocalDocker { + pub fn new( + context: Context, + id: &str, + name: &str, + logger: Box, + ) -> Result> { + let docker = Docker::new_with_options(true, context.docker_tcp_socket().clone())?; + Ok(LocalDocker { context, + docker, id: id.to_string(), name: name.to_string(), listeners: vec![], logger, - } - } - - fn image_does_exist(&self, image: &Image) -> Result { - let mut cmd = QoveryCommand::new( - "docker", - &vec!["image", "inspect", image.name_with_tag().as_str()], - &self.get_docker_host_envs(), - ); - - Ok(matches!(cmd.exec(), Ok(_))) + }) } fn get_docker_host_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - } + vec![] } /// Read Dockerfile content from location path and return an array of bytes @@ -89,34 +85,20 @@ impl LocalDocker { dockerfile_complete_path: &str, into_dir_docker_style: &str, env_var_args: Vec, - use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, ) -> Result { - let mut docker_args = if !use_build_cache { - vec!["build", "--no-cache"] - } else { - vec!["build"] + let image_to_build = ContainerImage { + registry: build.image.registry_url.clone(), + name: build.image.name(), + tags: vec![build.image.tag.clone(), "latest".to_string()], }; - let args = self.context.docker_build_options(); - for v in args.iter() { - for s in v.iter() { - docker_args.push(String::as_str(s)); - } - } - - let name_with_tag = build.image.name_with_tag(); - let name_with_latest_tag = build.image.name_with_latest_tag(); - - docker_args.extend(vec![ - "-f", - dockerfile_complete_path, - "-t", - name_with_tag.as_str(), - "-t", - name_with_latest_tag.as_str(), - ]); + let image_cache = ContainerImage { + registry: build.image.registry_url.clone(), + name: build.image.name(), + tags: vec!["latest".to_string()], + }; let dockerfile_content = self.get_dockerfile_content(dockerfile_complete_path)?; let env_var_args = match docker::match_used_env_var_args(env_var_args, dockerfile_content) { @@ -133,27 +115,25 @@ impl LocalDocker { } }; - let mut docker_args = if env_var_args.is_empty() { - docker_args - } else { - let mut build_args = vec![]; + // FIXME: pass a Vec<(key, value)> instead of spliting always the string + let env_vars = env_var_args + .into_iter() + .map(|val| { + let (key, value) = val.rsplit_once('=').unwrap(); + (key.to_string(), value.to_string()) + }) + .collect::>(); - env_var_args.iter().for_each(|arg_value| { - build_args.push("--build-arg"); - build_args.push(arg_value.as_str()); - }); - - docker_args.extend(build_args); - docker_args - }; - - docker_args.push(into_dir_docker_style); - - // docker build - let mut cmd = QoveryCommand::new("docker", &docker_args, &self.get_docker_host_envs()); - - let exit_status = cmd.exec_with_abort( - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), + let exit_status = self.docker.build( + &Path::new(dockerfile_complete_path), + &Path::new(into_dir_docker_style), + &image_to_build, + &env_vars + .iter() + .map(|(k, v)| (k.as_str(), v.as_str())) + .collect::>(), + &image_cache, + true, |line| { self.logger.log( LogLevel::Info, @@ -171,25 +151,26 @@ impl LocalDocker { }, |line| { self.logger.log( - LogLevel::Warning, - EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), + LogLevel::Info, + EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), ); lh.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: build.image.application_id.clone(), }, - ProgressLevel::Warn, + ProgressLevel::Info, Some(line), self.context.execution_id(), )); }, + Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), is_task_canceled, ); match exit_status { Ok(_) => Ok(BuildResult { build }), - Err(Killed(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), + Err(DockerError::Aborted(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), Err(err) => Err(EngineError::new_docker_cannot_build_container_image( self.get_event_details(), self.name_with_id(), @@ -207,10 +188,8 @@ impl LocalDocker { lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, ) -> Result { - let name_with_tag = build.image.name_with_tag(); - let name_with_latest_tag = build.image.name_with_latest_tag(); - - let args = self.context.docker_build_options(); + let name_with_tag = build.image.full_image_name_with_tag(); + let name_with_latest_tag = format!("{}:latest", build.image.full_image_name()); let mut exit_status: Result<(), command::CommandError> = Err(command::CommandError::ExecutionError( Error::new(ErrorKind::InvalidData, "No builder names".to_string()), @@ -218,20 +197,13 @@ impl LocalDocker { for builder_name in BUILDPACKS_BUILDERS.iter() { let mut buildpacks_args = if !use_build_cache { - vec!["build", name_with_tag.as_str(), "--clear-cache"] + vec!["build", "--publish", name_with_tag.as_str(), "--clear-cache"] } else { - vec!["build", name_with_tag.as_str()] + vec!["build", "--publish", name_with_tag.as_str()] }; // always add 'latest' tag buildpacks_args.extend(vec!["-t", name_with_latest_tag.as_str()]); - - for v in args.iter() { - for s in v.iter() { - buildpacks_args.push(String::as_str(s)); - } - } - buildpacks_args.extend(vec!["--path", into_dir_docker_style]); let mut buildpacks_args = if env_var_args.is_empty() { @@ -414,69 +386,7 @@ impl BuildPlatform for LocalDocker { Ok(()) } - fn has_cache(&self, build: &Build) -> Result { - let event_details = self.get_event_details(); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("LocalDocker.has_cache() called".to_string()), - ), - ); - - // Check if a local cache layers for the container image exists. - let repository_root_path = self.get_repository_build_root_path(&build)?; - - let parent_build = build.to_previous_build(repository_root_path).map_err(|err| { - EngineError::new_builder_get_build_error(self.get_event_details(), build.image.commit_id.to_string(), err) - })?; - - let parent_build = match parent_build { - Some(parent_build) => parent_build, - None => return Ok(CacheResult::MissWithoutParentBuild), - }; - - // check if local layers exist - let cmd_bin = "docker"; - let image_name = parent_build.image.name.clone(); - let cmd_args = vec!["images", "-q", &image_name]; - let mut cmd = QoveryCommand::new(cmd_bin, &cmd_args.clone(), &[]); - - let mut result = CacheResult::Miss(parent_build); - let _ = cmd.exec_with_timeout( - Duration::minutes(1), // `docker images` command can be slow with tons of images - it's probably not indexed - |_| result = CacheResult::Hit, // if a line is returned, then the image is locally present - |r_err| { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_docker_cannot_list_images( - event_details.clone(), - CommandError::new_from_command_line( - "Cannot list docker images".to_string(), - cmd_bin.to_string(), - cmd_args.clone().into_iter().map(|v| v.to_string()).collect(), - vec![], - None, - Some(r_err.to_string()), - ), - ), - None, - ), - ) - }, - ); - - Ok(result) - } - - fn build( - &self, - build: Build, - force_build: bool, - is_task_canceled: &dyn Fn() -> bool, - ) -> Result { + fn build(&self, build: Build, is_task_canceled: &dyn Fn() -> bool) -> Result { let event_details = self.get_event_details(); self.logger.log( @@ -492,22 +402,6 @@ impl BuildPlatform for LocalDocker { } let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_build && self.image_does_exist(&build.image)? { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Image `{}` found on repository, container build is not required", - build.image.name_with_tag() - )), - ), - ); - - return Ok(BuildResult { build }); - } - let repository_root_path = self.get_repository_build_root_path(&build)?; self.logger.log( @@ -551,6 +445,7 @@ impl BuildPlatform for LocalDocker { if is_task_canceled() { return Err(EngineError::new_task_cancellation_requested(event_details.clone())); } + if let Err(clone_error) = git::clone_at_commit( &build.git_repository.url, &build.git_repository.commit_id, @@ -669,7 +564,6 @@ impl BuildPlatform for LocalDocker { dockerfile_absolute_path.as_str(), build_context_path.as_str(), env_var_args, - !disable_build_cache, &listeners_helper, is_task_canceled, ) @@ -714,38 +608,6 @@ impl BuildPlatform for LocalDocker { result } - fn build_error(&self, build: Build) -> Result { - let event_details = self.get_event_details(); - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!("LocalDocker.build_error() called for {}", self.name())), - ), - ); - - let listener_helper = ListenersHelper::new(&self.listeners); - - // FIXME - let message = String::from("something goes wrong (not implemented)"); - - listener_helper.error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id, - }, - ProgressLevel::Error, - Some(message.as_str()), - self.context.execution_id(), - )); - - let err = EngineError::new_not_implemented_error(event_details); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - // FIXME - Err(err) - } - fn logger(&self) -> Box { self.logger.clone() } @@ -814,6 +676,7 @@ fn docker_prune_images(envs: Vec<(&str, &str)>) -> Result<(), CommandError> { vec!["image", "prune", "-a", "-f"], vec!["builder", "prune", "-a", "-f"], vec!["volume", "prune", "-f"], + vec!["buildx", "prune", "-a", "-f"], ]; let mut errored_commands = vec![]; diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 5480fb37..0b9ef095 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -1,15 +1,11 @@ use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use crate::errors::{CommandError, EngineError}; +use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; -use crate::git; use crate::logger::Logger; use crate::models::{Context, Listen, QoveryIdentifier}; -use crate::utilities::get_image_tag; -use git2::{Cred, CredentialType}; use std::fmt::{Display, Formatter, Result as FmtResult}; -use std::path::Path; +use url::Url; pub mod docker; pub mod local_docker; @@ -23,14 +19,7 @@ pub trait BuildPlatform: ToTransmitter + Listen { format!("{} ({})", self.name(), self.id()) } fn is_valid(&self) -> Result<(), EngineError>; - fn has_cache(&self, build: &Build) -> Result; - fn build( - &self, - build: Build, - force_build: bool, - is_task_canceled: &dyn Fn() -> bool, - ) -> Result; - fn build_error(&self, build: Build) -> Result; + fn build(&self, build: Build, is_task_canceled: &dyn Fn() -> bool) -> Result; fn logger(&self) -> Box; fn get_event_details(&self) -> EventDetails { let context = self.context(); @@ -52,63 +41,6 @@ pub struct Build { pub options: BuildOptions, } -impl Build { - pub fn to_previous_build

(&self, clone_repo_into_dir: P) -> Result, CommandError> - where - P: AsRef, - { - let parent_commit_id = git::get_parent_commit_id( - self.git_repository.url.as_str(), - self.git_repository.commit_id.as_str(), - clone_repo_into_dir, - &|_| match &self.git_repository.credentials { - None => vec![], - Some(creds) => vec![( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(creds.login.as_str(), creds.password.as_str()).unwrap(), - )], - }, - ) - .map_err(|err| CommandError::new(err.to_string(), Some("Cannot get parent commit ID.".to_string())))?; - - let parent_commit_id = match parent_commit_id { - None => return Ok(None), - Some(parent_commit_id) => parent_commit_id, - }; - - let mut environment_variables_map = BTreeMap::::new(); - for env in &self.options.environment_variables { - environment_variables_map.insert(env.key.clone(), env.value.clone()); - } - - let mut image = self.image.clone(); - image.tag = get_image_tag( - &self.git_repository.root_path, - &self.git_repository.dockerfile_path, - &environment_variables_map, - &parent_commit_id, - ); - - image.commit_id = parent_commit_id.clone(); - - Ok(Some(Build { - git_repository: GitRepository { - url: self.git_repository.url.clone(), - credentials: self.git_repository.credentials.clone(), - ssh_keys: self.git_repository.ssh_keys.clone(), - commit_id: parent_commit_id, - dockerfile_path: self.git_repository.dockerfile_path.clone(), - root_path: self.git_repository.root_path.clone(), - buildpack_language: self.git_repository.buildpack_language.clone(), - }, - image, - options: BuildOptions { - environment_variables: self.options.environment_variables.clone(), - }, - })) - } -} - pub struct BuildOptions { pub environment_variables: Vec, } @@ -149,22 +81,33 @@ pub struct Image { pub tag: String, pub commit_id: String, // registry name where the image has been pushed: Optional - pub registry_name: Option, + pub registry_name: String, // registry docker json config: Optional pub registry_docker_json_config: Option, - // registry secret to pull image: Optional - pub registry_secret: Option, // complete registry URL where the image has been pushed - pub registry_url: Option, + pub registry_url: Url, } impl Image { - pub fn name_with_tag(&self) -> String { - format!("{}:{}", self.name, self.tag) + pub fn registry_host(&self) -> &str { + self.registry_url.host_str().unwrap() } - pub fn name_with_latest_tag(&self) -> String { - format!("{}:latest", self.name) + pub fn full_image_name_with_tag(&self) -> String { + format!( + "{}/{}:{}", + self.registry_url.host_str().unwrap_or_default(), + self.name, + self.tag + ) + } + + pub fn full_image_name(&self) -> String { + format!("{}/{}", self.registry_url.host_str().unwrap_or_default(), self.name,) + } + + pub fn name(&self) -> String { + self.name.clone() } } @@ -175,10 +118,9 @@ impl Default for Image { name: "".to_string(), tag: "".to_string(), commit_id: "".to_string(), - registry_name: None, + registry_name: "".to_string(), registry_docker_json_config: None, - registry_secret: None, - registry_url: None, + registry_url: Url::parse("https://default.com").unwrap(), } } } @@ -208,11 +150,3 @@ impl BuildResult { pub enum Kind { LocalDocker, } - -type ParentBuild = Build; - -pub enum CacheResult { - MissWithoutParentBuild, - Miss(ParentBuild), - Hit, -} diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs index 7768f164..cc11139a 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/cloud_provider/aws/application.rs @@ -15,8 +15,8 @@ use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::EngineError; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; use ::function_name::named; @@ -201,26 +201,7 @@ impl Service for Application { let commit_id = self.image().commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - - match &self.image().registry_url { - Some(registry_url) => context.insert("image_name_with_tag", registry_url.as_str()), - None => { - let image_name_with_tag = self.image().name_with_tag(); - - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } + context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); let environment_variables = self .environment_variables @@ -233,16 +214,8 @@ impl Service for Application { context.insert("environment_variables", &environment_variables); context.insert("ports", &self.ports); - - match self.image.registry_name.as_ref() { - Some(registry_name) => { - context.insert("is_registry_secret", &true); - context.insert("registry_secret", registry_name); - } - None => { - context.insert("is_registry_secret", &false); - } - }; + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.image.registry_host()); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index b7dd0d2b..bcb3454b 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -15,8 +15,8 @@ use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; use ::function_name::named; use std::fmt; @@ -205,26 +205,7 @@ impl Service for Application { let commit_id = self.image.commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - - match &self.image.registry_url { - Some(registry_url) => context.insert("image_name_with_tag", registry_url.as_str()), - None => { - let image_name_with_tag = self.image.name_with_tag(); - - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } + context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -258,16 +239,8 @@ impl Service for Application { context.insert("environment_variables", &environment_variables); context.insert("ports", &self.ports); - - if self.image.registry_name.is_some() { - context.insert("is_registry_secret", &true); - context.insert( - "registry_secret", - &"do-container-registry-secret-for-cluster".to_string(), - ); - } else { - context.insert("is_registry_secret", &false); - }; + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.image.registry_host()); let storage = self .storage diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index 7405d0bf..0c9c2abe 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -18,8 +18,8 @@ use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; use ::function_name::named; @@ -206,27 +206,7 @@ impl Service for Application { let commit_id = self.image().commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - - match &self.image().registry_url { - Some(registry_url) => context.insert( - "image_name_with_tag", - format!("{}/{}", registry_url.as_str(), self.image().name_with_tag()).as_str(), - ), - None => { - let image_name_with_tag = self.image().name_with_tag(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } + context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); let environment_variables = self .environment_variables @@ -239,16 +219,8 @@ impl Service for Application { context.insert("environment_variables", &environment_variables); context.insert("ports", &self.ports); - - match self.image.registry_name.as_ref() { - Some(_) => { - context.insert("is_registry_secret", &true); - context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); - } - None => { - context.insert("is_registry_secret", &false); - } - }; + context.insert("is_registry_secret", &true); + context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs new file mode 100644 index 00000000..5765405b --- /dev/null +++ b/src/cmd/docker.rs @@ -0,0 +1,682 @@ +use crate::cmd::command::{CommandError, QoveryCommand}; +use crate::errors::EngineError; +use crate::events::EventDetails; +use chrono::Duration; +use std::path::Path; +use std::process::ExitStatus; +use url::Url; + +#[derive(thiserror::Error, Debug)] +pub enum DockerError { + #[error("Docker Invalid configuration: {0}")] + InvalidConfig(String), + + #[error("Docker terminated with an unknown error: {0}")] + ExecutionError(#[from] std::io::Error), + + #[error("Docker terminated with a non success exit status code: {0}")] + ExitStatusError(ExitStatus), + + #[error("Docker aborted due to user cancel request: {0}")] + Aborted(String), + + #[error("Docker command terminated due to timeout: {0}")] + Timeout(String), +} + +#[derive(Debug)] +pub struct ContainerImage { + pub registry: Url, + pub name: String, + pub tags: Vec, +} + +impl ContainerImage { + pub fn image_names(&self) -> Vec { + let host = if let Some(port) = self.registry.port() { + format!("{}:{}", self.registry.host_str().unwrap_or_default(), port) + } else { + self.registry.host_str().unwrap_or_default().to_string() + }; + + self.tags + .iter() + .map(|tag| format!("{}/{}:{}", host, &self.name, tag)) + .collect() + } + + pub fn image_name(&self) -> String { + self.image_names().remove(0) + } +} + +pub struct Docker { + use_buildkit: bool, + common_envs: Vec<(String, String)>, +} + +impl Docker { + pub fn new_with_options(enable_buildkit: bool, socket_location: Option) -> Result { + let mut docker = Docker { + use_buildkit: enable_buildkit, + common_envs: vec![( + "DOCKER_BUILDKIT".to_string(), + if enable_buildkit { + "1".to_string() + } else { + "0".to_string() + }, + )], + }; + + // Override DOCKER_HOST if we use a TCP socket + if let Some(socket_location) = socket_location { + docker + .common_envs + .push(("DOCKER_HOST".to_string(), socket_location.to_string())) + } + + // If we don't use buildkit nothing more to do + if !docker.use_buildkit { + return Ok(docker); + } + + // First check that the buildx plugin is correctly installed + let args = vec!["buildx", "version"]; + let buildx_cmd_exist = docker_exec( + &args, + &docker.get_all_envs(&vec![]), + Some(Duration::max_value()), + &|| false, + |_| {}, + |_| {}, + ); + if let Err(_) = buildx_cmd_exist { + return Err(DockerError::InvalidConfig(format!( + "Docker buildx plugin for buildkit is not correctly installed" + ))); + } + + // In order to be able to use --cache-from --cache-to for buildkit, + // we need to create our specific builder, which is not the default one (aka: the docker one) + let args = vec![ + "buildx", + "create", + "--name", + "qovery-engine", + "--driver-opt", + "network=host", + "--use", + ]; + let _ = docker_exec( + &args, + &docker.get_all_envs(&vec![]), + Some(Duration::max_value()), + &|| false, + |_| {}, + |_| {}, + ); + + Ok(docker) + } + + pub fn new(socket_location: Option) -> Result { + Self::new_with_options(true, socket_location) + } + + fn get_all_envs<'a>(&'a self, envs: &'a [(&'a str, &'a str)]) -> Vec<(&'a str, &'a str)> { + let mut all_envs: Vec<(&str, &str)> = self.common_envs.iter().map(|(k, v)| (k.as_str(), v.as_str())).collect(); + all_envs.append(&mut envs.to_vec()); + + all_envs + } + + pub fn login(&self, registry: &Url) -> Result<(), DockerError> { + info!("Docker login {} as user {}", registry, registry.username()); + let password = urlencoding::decode(®istry.password().unwrap_or_default()) + .unwrap_or_default() + .to_string(); + let args = vec![ + "login", + registry.host_str().unwrap_or_default(), + "-u", + registry.username(), + "-p", + &password, + ]; + + docker_exec( + &args, + &self.get_all_envs(&vec![]), + None, + &|| false, + |line| info!("{}", line), + |line| warn!("{}", line), + )?; + + Ok(()) + } + + pub fn does_image_exist_locally(&self, image: &ContainerImage) -> Result { + info!("Docker check locally image exist {:?}", image); + + let ret = docker_exec( + &vec!["image", "inspect", &image.image_name()], + &self.get_all_envs(&vec![]), + None, + &|| false, + |line| info!("{}", line), + |line| warn!("{}", line), + ); + + Ok(matches!(ret, Ok(_))) + } + + // Warning: this command is slow > 10 sec + pub fn does_image_exist_remotely(&self, image: &ContainerImage) -> Result { + info!("Docker check remotely image exist {:?}", image); + + let ret = docker_exec( + &vec!["manifest", "inspect", &image.image_name()], + &self.get_all_envs(&vec![]), + None, + &|| false, + |line| info!("{}", line), + |line| warn!("{}", line), + ); + + match ret { + Ok(_) => Ok(true), + Err(DockerError::ExitStatusError(_)) => Ok(false), + Err(err) => Err(err), + } + } + + pub fn pull( + &self, + image: &ContainerImage, + stdout_output: Stdout, + stderr_output: Stderr, + timeout: Duration, + should_abort: &dyn Fn() -> bool, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker pull {:?}, timeout: {:?}", image, timeout); + + docker_exec( + &vec!["pull", &image.image_name()], + &self.get_all_envs(&vec![]), + Some(timeout), + should_abort, + stdout_output, + stderr_output, + ) + } + + pub fn build( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: Stdout, + stderr_output: Stderr, + timeout: Duration, + should_abort: &dyn Fn() -> bool, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + // if there is no tags, nothing to build + if image_to_build.tags.is_empty() { + return Ok(()); + } + + // if it is already aborted, nothing to do + if (should_abort)() { + return Err(DockerError::Aborted("build".to_string())); + } + + // Do some checks + if !dockerfile.is_file() { + return Err(DockerError::InvalidConfig(format!( + "provided dockerfile `{:?}` is not a valid file", + dockerfile + ))); + } + + if !context.is_dir() { + return Err(DockerError::InvalidConfig(format!( + "provided docker build context `{:?}` is not a valid directory", + context + ))); + } + + if self.use_buildkit { + self.build_with_buildkit( + dockerfile, + context, + image_to_build, + build_args, + cache, + push_after_build, + stdout_output, + stderr_output, + timeout, + should_abort, + ) + } else { + self.build_with_docker( + dockerfile, + context, + image_to_build, + build_args, + cache, + push_after_build, + stdout_output, + stderr_output, + timeout, + should_abort, + ) + } + } + + fn build_with_docker( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: Stdout, + stderr_output: Stderr, + timeout: Duration, + should_abort: &dyn Fn() -> bool, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker build {:?}", image_to_build.image_name()); + + // Best effort to pull the cache, if it does not exist that's ok too + let _ = self.pull(cache, |_| {}, |_| {}, timeout, should_abort); + + let mut args_string: Vec = vec![ + "build".to_string(), + "--network".to_string(), + "host".to_string(), + "-f".to_string(), + dockerfile.to_str().unwrap_or_default().to_string(), + ]; + + for image_name in image_to_build.image_names() { + args_string.push("--tag".to_string()); + args_string.push(image_name) + } + + for img_cache_name in cache.image_names() { + args_string.push("--tag".to_string()); + args_string.push(img_cache_name) + } + + for (k, v) in build_args { + args_string.push("--build-arg".to_string()); + args_string.push(format!("{}={}", k, v)); + } + + args_string.push(context.to_str().unwrap_or_default().to_string()); + + let _ = docker_exec( + &args_string.iter().map(|x| x.as_str()).collect::>(), + &self.get_all_envs(&vec![]), + Some(timeout), + should_abort, + stdout_output, + stderr_output, + )?; + + if push_after_build { + let _ = self.push(image_to_build, |_| {}, |_| {}, timeout, should_abort)?; + } + + Ok(()) + } + + fn build_with_buildkit( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: Stdout, + stderr_output: Stderr, + timeout: Duration, + should_abort: &dyn Fn() -> bool, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker buildkit build {:?}", image_to_build.image_name()); + + let mut args_string: Vec = vec![ + "buildx".to_string(), + "build".to_string(), + "--progress=plain".to_string(), + "--network=host".to_string(), + if push_after_build { + "--output=type=registry".to_string() // tell buildkit to push image to registry + } else { + "--output=type=docker".to_string() // tell buildkit to load the image into docker after build + }, + "--cache-from".to_string(), + format!("type=registry,ref={}", cache.image_name()), + // Disabled for now, because private ECR does not support it ... + // https://github.com/aws/containers-roadmap/issues/876 + // "--cache-to".to_string(), + // format!("type=registry,ref={}", cache.image_name()), + "-f".to_string(), + dockerfile.to_str().unwrap_or_default().to_string(), + ]; + + for image_name in image_to_build.image_names() { + args_string.push("--tag".to_string()); + args_string.push(image_name.to_string()) + } + + for (k, v) in build_args { + args_string.push("--build-arg".to_string()); + args_string.push(format!("{}={}", k, v)); + } + + args_string.push(context.to_str().unwrap_or_default().to_string()); + + docker_exec( + &args_string.iter().map(|x| x.as_str()).collect::>(), + &self.get_all_envs(&vec![]), + Some(timeout), + should_abort, + stdout_output, + stderr_output, + ) + } + + pub fn push( + &self, + image: &ContainerImage, + stdout_output: Stdout, + stderr_output: Stderr, + timeout: Duration, + should_abort: &dyn Fn() -> bool, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker push {:?}, timeout: {:?}", image, timeout); + let image_names = image.image_names(); + let mut args = vec!["push"]; + args.extend(image_names.iter().map(|x| x.as_str())); + + docker_exec( + &args, + &self.get_all_envs(&vec![]), + Some(timeout), + should_abort, + stdout_output, + stderr_output, + ) + } +} + +fn docker_exec( + args: &[&str], + envs: &[(&str, &str)], + timeout: Option, + should_abort: &dyn Fn() -> bool, + stdout_output: F, + stderr_output: X, +) -> Result<(), DockerError> +where + F: FnMut(String), + X: FnMut(String), +{ + let timeout = timeout.unwrap_or_else(|| Duration::max_value()); + let mut cmd = QoveryCommand::new("docker", args, envs); + let ret = cmd.exec_with_abort(timeout, stdout_output, stderr_output, should_abort); + + match ret { + Ok(_) => Ok(()), + Err(CommandError::TimeoutError(msg)) => Err(DockerError::Timeout(msg)), + Err(CommandError::Killed(msg)) => Err(DockerError::Aborted(msg)), + Err(CommandError::ExitStatusError(err)) => Err(DockerError::ExitStatusError(err)), + Err(CommandError::ExecutionError(err)) => Err(DockerError::ExecutionError(err)), + } +} + +pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> EngineError { + EngineError::new_docker_error(event_details.clone(), error) +} + +// start a local registry to run this test +// docker run --rm -ti -p 5000:5000 --name registry registry:2 +#[cfg(feature = "test-with-docker")] +#[cfg(test)] +mod tests { + use crate::cmd::docker::{ContainerImage, Docker, DockerError}; + use chrono::Duration; + use std::path::Path; + use url::Url; + + fn private_registry_url() -> Url { + Url::parse("http://localhost:5000").unwrap() + } + + #[test] + fn test_pull() { + let docker = Docker::new(None).unwrap(); + + // Invalid image should fails + let image = ContainerImage { + registry: Url::parse("https://docker.io").unwrap(), + name: "alpine".to_string(), + tags: vec!["666".to_string()], + }; + let ret = docker.pull( + &image, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + assert!(matches!(ret, Err(_))); + + // Valid image should be ok + let image = ContainerImage { + registry: Url::parse("https://docker.io").unwrap(), + name: "alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + + let ret = docker.pull( + &image, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + assert!(matches!(ret, Ok(_))); + + // Should timeout + let ret = docker.pull( + &image, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::seconds(1), + &|| false, + ); + assert!(matches!(ret, Err(DockerError::Timeout(_)))); + } + + #[test] + fn test_docker_build() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(false, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + let ret = docker.build_with_docker( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &vec![], + &image_cache, + false, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + + assert!(matches!(ret, Ok(_))); + + // It should fails with buildkit dockerfile + let ret = docker.build_with_docker( + Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &vec![], + &image_cache, + false, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + + assert!(matches!(ret, Err(_))); + } + + #[test] + fn test_buildkit_build() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(true, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + // It should work + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &vec![], + &image_cache, + false, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + + assert!(matches!(ret, Ok(_))); + + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &vec![], + &image_cache, + false, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + + assert!(matches!(ret, Ok(_))); + } + + #[test] + fn test_push() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(true, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + // It should work + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &vec![], + &image_cache, + false, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + assert!(matches!(ret, Ok(_))); + + let ret = docker.does_image_exist_locally(&image_to_build); + assert!(matches!(ret, Ok(true))); + + let ret = docker.does_image_exist_remotely(&image_to_build); + assert!(matches!(ret, Ok(false))); + + let ret = docker.push( + &image_to_build, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + assert!(matches!(ret, Ok(_))); + + let ret = docker.pull( + &image_to_build, + |msg| println!("{}", msg), + |msg| eprintln!("{}", msg), + Duration::max_value(), + &|| false, + ); + assert!(matches!(ret, Ok(_))); + } +} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index f486a56a..153aab46 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -1,4 +1,5 @@ pub mod command; +pub mod docker; pub mod helm; pub mod kubectl; pub mod structs; diff --git a/src/container_registry/docker.rs b/src/container_registry/docker.rs deleted file mode 100644 index 4eaa688d..00000000 --- a/src/container_registry/docker.rs +++ /dev/null @@ -1,462 +0,0 @@ -use crate::build_platform::Image; -use crate::cmd; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::Kind; -use crate::errors::CommandError; -use crate::events::{EngineEvent, EventDetails, EventMessage}; -use crate::logger::{LogLevel, Logger}; -use chrono::Duration; -use retry::delay::Fibonacci; -use retry::Error::Operation; -use retry::OperationResult; - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct DockerImageManifest { - pub schema_version: i64, - pub media_type: String, - pub config: Config, - pub layers: Vec, -} - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Config { - pub media_type: String, - pub size: i64, - pub digest: String, -} - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Layer { - pub media_type: String, - pub size: i64, - pub digest: String, -} - -pub fn docker_manifest_inspect( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - image_name: String, - image_tag: String, - registry_url: String, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result { - let image_with_tag = format!("{}:{}", image_name, image_tag); - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - // Note: `docker manifest inspect` is still experimental for the time being: - // https://docs.docker.com/engine/reference/commandline/manifest_inspect/ - let mut envs = docker_envs.clone(); - envs.push(("DOCKER_CLI_EXPERIMENTAL", "enabled")); - - let binary = "docker"; - let image_full_url = format!("{}/{}", registry_url.as_str(), &image_with_tag); - let args = vec!["manifest", "inspect", image_full_url.as_str()]; - let mut raw_output: Vec = vec![]; - - let mut cmd = QoveryCommand::new("docker", &args, &envs); - return match cmd.exec_with_timeout(Duration::minutes(1), |line| raw_output.push(line), |_| {}) { - Ok(_) => { - let joined = raw_output.join(""); - match serde_json::from_str(&joined) { - Ok(extracted_manifest) => Ok(extracted_manifest), - Err(e) => { - let error = CommandError::new( - e.to_string(), - Some(format!( - "Error while trying to deserialize manifest image manifest for image {} in {} ({}).", - image_with_tag, registry_provider, registry_url, - )), - ); - - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::from(error.clone())), - ); - - Err(error) - } - } - } - Err(e) => { - let error = CommandError::new( - format!( - "Command `{}`: {:?}", - cmd::command::command_to_string(binary, &args, &envs), - e - ), - Some(format!( - "Error while trying to inspect image manifest for image {} in {} ({}).", - image_with_tag, registry_provider, registry_url, - )), - ); - - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::from(error.clone())), - ); - - Err(error) - } - }; -} - -pub fn docker_login( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - registry_login: String, - registry_pass: String, - registry_url: String, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result<(), CommandError> { - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let binary = "docker"; - let args = vec![ - "login", - registry_url.as_str(), - "-u", - registry_login.as_str(), - "-p", - registry_pass.as_str(), - ]; - - let mut cmd = QoveryCommand::new(binary, &args, &docker_envs); - match cmd.exec() { - Ok(_) => Ok(()), - Err(e) => { - let err = CommandError::new( - format!( - "Command `{}`: {:?}", - cmd::command::command_to_string(binary, &args, &docker_envs), - e, - ), - Some(format!( - "Error while trying to login to registry {} {}.", - registry_provider, registry_url, - )), - ); - - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::from(err.clone())), - ); - - Err(err) - } - } -} - -pub fn docker_tag_and_push_image( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - image: &Image, - dest: String, - dest_latest_tag: String, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result<(), CommandError> { - let image_with_tag = image.name_with_tag(); - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let binary = "docker"; - let args = vec!["tag", &image_with_tag, dest.as_str()]; - let mut cmd = QoveryCommand::new(binary, &args, &docker_envs); - match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!("Failed to tag image `{}`, retrying...", image_with_tag), - Some(format!( - "Command `{}`: {:?}", - cmd::command::command_to_string(binary, &args, &docker_envs), - e - )), - ), - ), - ); - - OperationResult::Retry(e) - } - }) { - Err(Operation { error, .. }) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::from(CommandError::new_from_legacy_command_error( - error, - Some(format!("Error while trying to tag docker image `{}`", image_with_tag)), - )), - ), - ); - } - Err(retry::Error::Internal(msg)) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::from(CommandError::new( - msg, - Some(format!("Error while trying to tag docker image `{}`", image_with_tag)), - )), - ), - ); - } - Ok(_) => {} - } - - let mut cmd = QoveryCommand::new("docker", &vec!["push", dest.as_str()], &docker_envs); - let _ = match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| { - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), - ) - }, - |line| { - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), - ) - }, - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Failed to push image `{}` on `{}`, retrying ...", - image_with_tag, registry_provider - ), - Some(format!("{:?}", e)), - ), - ), - ); - - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(CommandError::new_from_legacy_command_error( - error, - Some(format!("Failed to push docker image `{}`", image_with_tag)), - )), - Err(retry::Error::Internal(msg)) => Err(CommandError::new( - msg, - Some(format!("Failed to push docker image `{}`", image_with_tag)), - )), - _ => { - logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Image {} has successfully been pushed on `{}`", - image_with_tag, registry_provider - )), - ), - ); - - Ok(()) - } - }; - - let image_with_latest_tag = image.name_with_latest_tag(); - let mut cmd = QoveryCommand::new( - "docker", - &vec!["tag", &image_with_latest_tag, dest_latest_tag.as_str()], - &docker_envs, - ); - match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!("Failed to tag image `{}`, retrying ...", image_with_latest_tag), - Some(format!("{:?}", e)), - ), - ), - ); - OperationResult::Retry(e) - } - }) { - Err(Operation { error, .. }) => { - return Err(CommandError::new_from_legacy_command_error( - error, - Some(format!("Failed to tag docker image `{}`", image_with_tag)), - )) - } - Err(retry::Error::Internal(msg)) => { - return Err(CommandError::new( - msg, - Some(format!("Failed to tag docker image `{}`", image_with_tag)), - )) - } - _ => {} - } - - let mut cmd = QoveryCommand::new("docker", &vec!["push", dest_latest_tag.as_str()], &docker_envs); - match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| { - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), - ) - }, - |line| { - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), - ) - }, - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Failed to push image {} on {}, retrying...", - image_with_tag, registry_provider - ), - Some(format!("{:?}", e)), - ), - ), - ); - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(CommandError::new(error.to_string(), None)), - Err(e) => Err(CommandError::new( - format!("{:?}", e), - Some(format!( - "Unknown error while trying to push image {} to {}.", - image_with_tag, registry_provider, - )), - )), - _ => { - logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("image {} has successfully been pushed", image_with_tag)), - ), - ); - Ok(()) - } - } -} - -pub fn docker_pull_image( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - dest: String, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result<(), CommandError> { - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let mut cmd = QoveryCommand::new("docker", &vec!["pull", dest.as_str()], &docker_envs); - match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| { - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new(line, None)), - ) - }, - |line| { - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::new(line, None)), - ) - }, - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "failed to pull image from {} registry {}, retrying...", - registry_provider, - dest.as_str(), - ), - Some(format!("{:?}", e)), - ), - ), - ); - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(CommandError::new(error.to_string(), None)), - Err(e) => Err(CommandError::new( - format!("{:?}", e), - Some(format!( - "Unknown error while trying to pull image {} from {} registry.", - dest.as_str(), - registry_provider, - )), - )), - _ => { - logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Image {} has successfully been pulled from {} registry", - dest.as_str(), - registry_provider, - )), - ), - ); - Ok(()) - } - } -} diff --git a/src/container_registry/docker_hub.rs b/src/container_registry/docker_hub.rs deleted file mode 100644 index 63fb48b7..00000000 --- a/src/container_registry/docker_hub.rs +++ /dev/null @@ -1,320 +0,0 @@ -extern crate reqwest; - -use reqwest::StatusCode; -use std::borrow::Borrow; - -use crate::build_platform::Image; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; - -pub struct DockerHub { - context: Context, - id: String, - name: String, - login: String, - password: String, - listeners: Listeners, - logger: Box, -} - -impl DockerHub { - pub fn new(context: Context, id: &str, name: &str, login: &str, password: &str, logger: Box) -> Self { - DockerHub { - context, - id: id.to_string(), - name: name.to_string(), - login: login.to_string(), - password: password.to_string(), - listeners: vec![], - logger, - } - } - - pub fn exec_docker_login(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - - let envs = match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - }; - - let mut cmd = QoveryCommand::new( - "docker", - &vec!["login", "-u", self.login.as_str(), "-p", self.password.as_str()], - &envs, - ); - - match cmd.exec() { - Ok(_) => Ok(()), - Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )), - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - let event_details = self.get_event_details(); - match docker_pull_image(self.kind(), vec![], dest.clone(), event_details.clone(), self.logger()) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(EngineError::new_docker_pull_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )), - } - } -} - -impl ToTransmitter for DockerHub { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } -} - -impl ContainerRegistry for DockerHub { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::DockerHub - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn is_valid(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_create(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn does_image_exists(&self, image: &Image) -> bool { - let event_details = self.get_event_details(); - use reqwest::blocking::Client; - let client = Client::new(); - let path = format!( - "https://index.docker.io/v1/repositories/{}/{}/tags", - &self.login, image.name - ); - let res = client - .get(path.as_str()) - .basic_auth(&self.login, Option::from(&self.password)) - .send(); - - // TODO (mzo) no check of existing tags as in others impl ? - match res { - Ok(out) => matches!(out.status(), StatusCode::OK), - Err(e) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_container_registry_repository_doesnt_exist( - event_details.clone(), - image.name.to_string(), - Some(CommandError::new( - e.to_string(), - Some("Error while trying to retrieve if DockerHub repository exist.".to_string()), - )), - ), - None, - ), - ); - false - } - } - } - - fn pull(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!( - "image {:?} does not exist in DockerHub {} repository", - image, - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from DockerHub {} repository", image, self.name()); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login()?; - - let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str()); - - // pull image - self.pull_image(dest, image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let event_details = self.get_event_details(); - - let _ = self.exec_docker_login()?; - - let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str()); - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {:?} found on DockerHub {} repository, container build is not required", - image, - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image {:?} does not exist on DockerHub {} repository, starting image upload", - image, - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}/{}:latest", self.login.as_str(), image.name); - match docker_tag_and_push_image( - self.kind(), - vec![], - &image, - dest.clone(), - dest_latest_tag, - event_details.clone(), - self.logger(), - ) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PushResult { image }) - } - Err(e) => Err(EngineError::new_docker_push_image_error( - event_details.clone(), - image.name.to_string(), - dest.to_string(), - e, - )), - } - } - - fn push_error(&self, _image: &Image) -> Result { - unimplemented!() - } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } -} - -impl Listen for DockerHub { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 6b71b36b..00d8060f 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -6,21 +6,17 @@ use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult}; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, EngineError, Kind}; use crate::errors::CommandError; -use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; +use crate::events::{EngineEvent, EventDetails, ToTransmitter, Transmitter}; use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::models::{Context, Listen, Listener, Listeners}; use crate::utilities; -use retry::delay::Fixed; -use retry::Error::Operation; -use retry::OperationResult; +use url::Url; const CR_API_PATH: &str = "https://api.digitalocean.com/v2/registry"; const CR_CLUSTER_API_PATH: &str = "https://api.digitalocean.com/v2/kubernetes/registry"; +const CR_REGISTRY_DOMAIN: &str = "registry.digitalocean.com"; // TODO : use --output json // see https://www.digitalocean.com/community/tutorials/how-to-use-doctl-the-official-digitalocean-command-line-client @@ -46,32 +42,16 @@ impl DOCR { } } - fn get_registry_name(&self, image: &Image) -> Result { + fn create_registry(&self, registry_name: &str) -> Result<(), EngineError> { let event_details = self.get_event_details(); - let registry_name = match image.registry_name.as_ref() { - // DOCR does not support upper cases - Some(registry_name) => registry_name.to_lowercase(), - None => get_current_registry_name(self.api_key.as_str(), event_details, self.logger())?, - }; - - Ok(registry_name) - } - - fn create_repository(&self, image: &Image) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - - let registry_name = match image.registry_name.as_ref() { - // DOCR does not support upper cases - Some(registry_name) => registry_name.to_lowercase(), - None => self.name.clone(), - }; - + // DOCR does not support upper cases + let registry_name = registry_name.to_lowercase(); let headers = utilities::get_header_with_bearer(&self.api_key); // subscription_tier_slug: https://www.digitalocean.com/products/container-registry/ // starter and basic tiers are too limited on repository creation let repo = DoApiCreateRepository { - name: registry_name.clone(), + name: registry_name.to_string(), subscription_tier_slug: "professional".to_string(), }; @@ -133,77 +113,7 @@ impl DOCR { } } - fn push_image(&self, registry_name: String, dest: String, image: &Image) -> Result { - let event_details = self.get_event_details(); - - let dest_latest_tag = format!( - "registry.digitalocean.com/{}/{}:latest", - registry_name.as_str(), - image.name - ); - - if let Err(e) = docker_tag_and_push_image( - self.kind(), - vec![], - image, - dest.clone(), - dest_latest_tag.clone(), - event_details.clone(), - self.logger(), - ) { - return Err(EngineError::new_docker_push_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )); - } - - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - - let result = retry::retry(Fixed::from_millis(10000).take(12), || { - match self.does_image_exists(&image) { - true => OperationResult::Ok(&image), - false => { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new_from_safe( - "Image is not yet available on DOCR, retrying in a few seconds...".to_string(), - ), - ), - ); - OperationResult::Retry(()) - } - } - }); - - let image_not_reachable = Err(EngineError::new_container_registry_image_unreachable_after_push( - event_details.clone(), - image.name.to_string(), - )); - match result { - Ok(_) => Ok(PushResult { image }), - Err(Operation { .. }) => image_not_reachable, - Err(retry::Error::Internal(_)) => image_not_reachable, - } - } - - pub fn get_image(&self, _image: &Image) -> Option<()> { - todo!() - } - - pub fn delete_image(&self, _image: &Image) -> Result<(), EngineError> { - // TODO(benjaminch): To be implemented later on, but note it must not slow down CI workflow - Ok(()) - } - - pub fn delete_repository(&self) -> Result<(), EngineError> { + pub fn delete_registry(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(); let headers = utilities::get_header_with_bearer(&self.api_key); @@ -255,27 +165,6 @@ impl DOCR { )), } } - - fn pull_image(&self, registry_name: String, dest: String, image: &Image) -> Result { - let event_details = self.get_event_details(); - - match docker_pull_image(self.kind(), vec![], dest.clone(), event_details.clone(), self.logger()) { - Ok(_) => { - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(EngineError::new_docker_pull_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )), - } - } } impl ToTransmitter for DOCR { @@ -305,38 +194,39 @@ impl ContainerRegistry for DOCR { Ok(()) } - fn on_create(&self) -> Result<(), EngineError> { + fn login(&self) -> Result { + let _ = self.exec_docr_login()?; + let registry_name = self.name.clone(); + Ok(ContainerRegistryInfo { + endpoint: Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(), + registry_name: self.name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(move |img_name| format!("{}/{}", registry_name, img_name)), + }) + } + + fn create_registry(&self) -> Result<(), EngineError> { + // Digital Ocean only allow one registry per account... + if let Err(_) = get_current_registry_name(self.api_key.as_str(), self.get_event_details(), self.logger()) { + let _ = self.create_registry(self.name())?; + } + Ok(()) } - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { + fn create_repository(&self, _repository_name: &str) -> Result<(), EngineError> { + // Nothing to do, DO only allow one registry and create repository on the flight when image are pushed Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { let event_details = self.get_event_details(); - let registry_name = match self.get_registry_name(image) { - Ok(registry_name) => registry_name, - Err(err) => { - self.logger.log(LogLevel::Error, EngineEvent::Error(err, None)); - return false; - } - }; - let headers = utilities::get_header_with_bearer(self.api_key.as_str()); let url = format!( "https://api.digitalocean.com/v2/registry/{}/repositories/{}/tags", - registry_name, - image.name.as_str() + image.registry_name, + image.name() ); let res = reqwest::blocking::Client::new() @@ -353,10 +243,10 @@ impl ContainerRegistry for DOCR { EngineEvent::Error( EngineError::new_container_registry_image_doesnt_exist( event_details.clone(), - image.name.to_string(), + image.name().to_string(), Some(CommandError::new_from_safe_message(format!( "While tyring to get all tags for image: `{}`, maybe this image not exist !", - image.name.to_string() + image.name().to_string() ))), ), None, @@ -372,10 +262,10 @@ impl ContainerRegistry for DOCR { EngineEvent::Error( EngineError::new_container_registry_image_doesnt_exist( event_details.clone(), - image.name.to_string(), + image.name().to_string(), Some(CommandError::new_from_safe_message(format!( "While trying to communicate with DigitalOcean API to retrieve all tags for image `{}`.", - image.name.to_string() + image.name().to_string() ))), ), None, @@ -405,7 +295,7 @@ impl ContainerRegistry for DOCR { EngineEvent::Error( EngineError::new_container_registry_image_doesnt_exist( event_details.clone(), - image.name.to_string(), + image.name().to_string(), Some(CommandError::new( out.to_string(), Some(format!( @@ -428,10 +318,10 @@ impl ContainerRegistry for DOCR { EngineEvent::Error( EngineError::new_container_registry_image_doesnt_exist( event_details.clone(), - image.name.to_string(), + image.name().to_string(), Some(CommandError::new_from_safe_message(format!( "While retrieving tags for image `{}` Unable to get output from DigitalOcean API.", - image.name.to_string() + image.name().to_string() ))), ), None, @@ -443,164 +333,6 @@ impl ContainerRegistry for DOCR { } } - fn pull(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!("image {:?} does not exist in DOCR {} repository", image, self.name()); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from DOCR {} repository", image, self.name()); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docr_login()?; - - let registry_name = self.get_registry_name(image)?; - - let dest = format!( - "registry.digitalocean.com/{}/{}", - registry_name.as_str(), - image.name_with_tag() - ); - - // pull image - self.pull_image(registry_name, dest, image) - } - - // https://www.digitalocean.com/docs/images/container-registry/how-to/use-registry-docker-kubernetes/ - fn push(&self, image: &Image, force_push: bool) -> Result { - let event_details = self.get_event_details(); - let registry_name = self.get_registry_name(image)?; - - match self.create_repository(image) { - Ok(_) => self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("DOCR {} has been created", registry_name.as_str())), - ), - ), - Err(e) => self.logger.log( - LogLevel::Error, - EngineEvent::Error( - e.clone(), - Some(EventMessage::new_from_safe(format!( - "DOCR {} already exists", - registry_name.as_str() - ))), - ), - ), - }; - - let _ = self.exec_docr_login()?; - - let dest = format!( - "registry.digitalocean.com/{}/{}", - registry_name.as_str(), - image.name_with_tag() - ); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {:?} found on DOCR {} repository, container build is not required", - image, - registry_name.as_str() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image {:?} does not exist on DOCR {} repository, starting image upload", - image, registry_name - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - self.push_image(registry_name, dest, image) - } - - fn push_error(&self, image: &Image) -> Result { - Ok(PushResult { image: image.clone() }) - } - fn logger(&self) -> &dyn Logger { self.logger.borrow() } diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index a24ef38a..28b4fd4c 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -10,20 +10,18 @@ use rusoto_ecr::{ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient}; use crate::build_platform::Image; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; +use crate::cmd::docker::{to_engine_error, Docker}; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::models::{Context, Listen, Listener, Listeners}; use crate::runtime::block_on; use retry::delay::Fixed; use retry::Error::Operation; use retry::OperationResult; use serde_json::json; +use url::Url; pub struct ECR { context: Context, @@ -75,9 +73,9 @@ impl ECR { EcrClient::new_with_client(self.client(), self.region.clone()) } - fn get_repository(&self, image: &Image) -> Option { + fn get_repository(&self, repository_name: &str) -> Option { let mut drr = DescribeRepositoriesRequest::default(); - drr.repository_names = Some(vec![image.name.to_string()]); + drr.repository_names = Some(vec![repository_name.to_string()]); let r = block_on(self.ecr_client().describe_repositories(drr)); @@ -93,7 +91,7 @@ impl ECR { fn get_image(&self, image: &Image) -> Option { let mut dir = DescribeImagesRequest::default(); - dir.repository_name = image.name.to_string(); + dir.repository_name = image.name().to_string(); let mut image_identifier = ImageIdentifier::default(); image_identifier.image_tag = Some(image.tag.to_string()); @@ -111,71 +109,8 @@ impl ECR { } } - fn docker_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - } - } - - fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { - // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-push-ecr-image.html - // docker tag e9ae3c220b23 aws_account_id.dkr.ecr.region.amazonaws.com/my-web-app + fn create_repository(&self, repository_name: &str) -> Result { let event_details = self.get_event_details(); - - match docker_tag_and_push_image( - self.kind(), - self.docker_envs(), - &image, - dest.clone(), - dest_latest_tag, - event_details.clone(), - self.logger(), - ) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PushResult { image }) - } - Err(e) => Err(EngineError::new_docker_push_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )), - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html - // docker pull aws_account_id.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest - let event_details = self.get_event_details(); - - match docker_pull_image( - self.kind(), - self.docker_envs(), - dest.clone(), - event_details.clone(), - self.logger(), - ) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(EngineError::new_docker_pull_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )), - } - } - - fn create_repository(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - let repository_name = image.name.as_str(); - self.logger().log( LogLevel::Info, EngineEvent::Info( @@ -314,7 +249,7 @@ impl ECR { }); let plp = PutLifecyclePolicyRequest { - repository_name: image.name.clone(), + repository_name: repository_name.to_string(), lifecycle_policy_text: lifecycle_policy_text.to_string(), ..Default::default() }; @@ -327,27 +262,27 @@ impl ECR { CommandError::new_from_safe_message(err.to_string()), ), ), - _ => Ok(self.get_repository(image).expect("cannot get repository")), + _ => Ok(self.get_repository(repository_name).expect("cannot get repository")), } } - fn get_or_create_repository(&self, image: &Image) -> Result { + fn get_or_create_repository(&self, repository_name: &str) -> Result { let event_details = self.get_event_details(); // check if the repository already exists - let repository = self.get_repository(image); + let repository = self.get_repository(repository_name); if repository.is_some() { self.logger.log( LogLevel::Info, EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe(format!("ECR repository {} already exists", image.name.as_str())), + EventMessage::new_from_safe(format!("ECR repository {} already exists", repository_name)), ), ); return Ok(repository.unwrap()); } - self.create_repository(image) + self.create_repository(repository_name) } fn get_credentials(&self) -> Result { @@ -391,32 +326,6 @@ impl ECR { Ok(ECRCredentials::new(access_token, password, endpoint_url)) } - - fn exec_docker_login(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - let credentials = self.get_credentials()?; - - let mut cmd = QoveryCommand::new( - "docker", - &vec![ - "login", - "-u", - credentials.access_token.as_str(), - "-p", - credentials.password.as_str(), - credentials.endpoint_url.as_str(), - ], - &self.docker_envs(), - ); - - if let Err(_) = cmd.exec() { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details.clone(), - )); - }; - - Ok(()) - } } impl ToTransmitter for ECR { @@ -454,196 +363,41 @@ impl ContainerRegistry for ECR { } } - fn on_create(&self) -> Result<(), EngineError> { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe("ECR.on_create() called".to_string()), - ), - ); + fn login(&self) -> Result { + let event_details = self.get_event_details(); + let credentials = self.get_credentials()?; + let docker = Docker::new(self.context.docker_tcp_socket().clone()) + .map_err(|err| to_engine_error(&event_details, err))?; + let mut registry_url = Url::parse(credentials.endpoint_url.as_str()).unwrap(); + let _ = registry_url.set_username(&credentials.access_token); + let _ = registry_url.set_password(Some(&credentials.password)); + + let _ = docker + .login(®istry_url) + .map_err(|err| to_engine_error(&event_details, err))?; + + Ok(ContainerRegistryInfo { + endpoint: registry_url, + registry_name: self.name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(|img_name| img_name.to_string()), + }) + } + + fn create_registry(&self) -> Result<(), EngineError> { + // Nothing to do, ECR require to create only repository Ok(()) } - fn on_create_error(&self) -> Result<(), EngineError> { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe("ECR.on_create_error() called".to_string()), - ), - ); - - unimplemented!() - } - - fn on_delete(&self) -> Result<(), EngineError> { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe("ECR.on_delete() called".to_string()), - ), - ); - unimplemented!() - } - - fn on_delete_error(&self) -> Result<(), EngineError> { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe("ECR.on_delete_error() called".to_string()), - ), - ); - unimplemented!() + fn create_repository(&self, name: &str) -> Result<(), EngineError> { + let _ = self.get_or_create_repository(name)?; + Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { self.get_image(image).is_some() } - fn pull(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!( - "image `{}` does not exist in ECR {} repository", - image.name_with_tag(), - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!( - "pull image `{:?}` from ECR {} repository", - image.name_with_tag(), - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login()?; - - let repository = self.get_or_create_repository(image)?; - - let dest = format!("{}:{}", repository.repository_uri.unwrap(), image.tag.as_str()); - - // pull image - self.pull_image(dest, image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let _ = self.exec_docker_login()?; - - let repository = if force_push { - self.create_repository(image) - } else { - self.get_or_create_repository(image) - }?; - - let repository_uri = repository.repository_uri.expect("Error getting repository URI"); - let dest = format!("{}:{}", repository_uri, image.tag.as_str()); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {} found on ECR {} repository, container build is not required", - image.name_with_tag(), - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image `{}` does not exist on ECR {} repository, starting image upload", - image.name_with_tag(), - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}:latest", repository_uri); - self.push_image(dest, dest_latest_tag, image) - } - - fn push_error(&self, image: &Image) -> Result { - // TODO change this - Ok(PushResult { image: image.clone() }) - } - fn logger(&self) -> &dyn Logger { self.logger.borrow() } diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index f74c1dff..ddf8e56d 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use url::Url; use crate::build_platform::Image; use crate::errors::EngineError; @@ -6,8 +7,6 @@ use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::logger::Logger; use crate::models::{Context, Listen, QoveryIdentifier}; -pub mod docker; -pub mod docker_hub; pub mod docr; pub mod ecr; pub mod scaleway_container_registry; @@ -21,14 +20,26 @@ pub trait ContainerRegistry: Listen + ToTransmitter { format!("{} ({})", self.name(), self.id()) } fn is_valid(&self) -> Result<(), EngineError>; - fn on_create(&self) -> Result<(), EngineError>; - fn on_create_error(&self) -> Result<(), EngineError>; - fn on_delete(&self) -> Result<(), EngineError>; - fn on_delete_error(&self) -> Result<(), EngineError>; + + // Login into the registry and setup everything for it + // mainly getting creds and calling docker login behind the hood + // It is poart of the ContainerRegistry only because DigitalOcean require to call doctl + // and that we can't get credentials directly + fn login(&self) -> Result; + + // Some provider require specific action in order to allow container registry + // For now it is only digital ocean, that require 2 steps to have registries + fn create_registry(&self) -> Result<(), EngineError>; + + // Call to create a specific repository in the registry + // i.e: docker.io/erebe or docker.io/qovery + // All providers requires action for that + // The convention for us is that we create one per application + fn create_repository(&self, repository_name: &str) -> Result<(), EngineError>; + + // Check on the registry if a specific image already exist fn does_image_exists(&self, image: &Image) -> bool; - fn pull(&self, image: &Image) -> Result; - fn push(&self, image: &Image, force_push: bool) -> Result; - fn push_error(&self, image: &Image) -> Result; + fn logger(&self) -> &dyn Logger; fn get_event_details(&self) -> EventDetails { let context = self.context(); @@ -44,6 +55,17 @@ pub trait ContainerRegistry: Listen + ToTransmitter { } } +pub struct ContainerRegistryInfo { + pub endpoint: Url, // Contains username and password if necessary + pub registry_name: String, + pub registry_docker_json_config: Option, + // give it the name of your image, and it returns the full name with prefix if needed + // i.e: for DigitalOcean => registry_name/image_name + // i.e: fo scaleway => image_name/image_name + // i.e: for AWS => image_name + pub get_image_name: Box String>, +} + pub struct PushResult { pub image: Image, } @@ -56,7 +78,6 @@ pub enum PullResult { #[derive(Serialize, Deserialize, Clone, Copy, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { - DockerHub, Ecr, Docr, ScalewayCr, diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 1f8f18f9..fcdb03ac 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -5,21 +5,15 @@ use std::borrow::Borrow; use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; -use crate::container_registry::docker::{ - docker_login, docker_manifest_inspect, docker_pull_image, docker_tag_and_push_image, -}; -use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; +use crate::cmd::docker; +use crate::cmd::docker::Docker; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::models::{Context, Listen, Listener, Listeners}; use crate::runtime::block_on; -use retry::delay::Fibonacci; -use retry::Error::Operation; -use retry::OperationResult; -use rusoto_core::param::ToParam; +use url::Url; pub struct ScalewayCR { context: Context, @@ -29,6 +23,7 @@ pub struct ScalewayCR { login: String, secret_token: String, zone: ScwZone, + docker: Docker, listeners: Listeners, logger: Box, } @@ -43,6 +38,8 @@ impl ScalewayCR { zone: ScwZone, logger: Box, ) -> ScalewayCR { + let docker = Docker::new(context.docker_tcp_socket().clone()).unwrap(); // FIXME: remove unwrap + ScalewayCR { context, id: id.to_string(), @@ -51,6 +48,7 @@ impl ScalewayCR { login: "nologin".to_string(), secret_token: secret_token.to_string(), zone, + docker, listeners: Vec::new(), logger, } @@ -66,16 +64,9 @@ impl ScalewayCR { } } - fn get_docker_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - } - } - pub fn get_registry_namespace( &self, - image: &Image, + namespace_name: &str, ) -> Option { // https://developers.scaleway.com/en/products/registry/api/#get-09e004 let scaleway_registry_namespaces = match block_on(scaleway_api_rs::apis::namespaces_api::list_namespaces( @@ -86,7 +77,7 @@ impl ScalewayCR { None, None, Some(self.default_project_id.as_str()), - image.registry_name.as_deref(), + Some(namespace_name), )) { Ok(res) => res.namespaces, Err(e) => { @@ -96,7 +87,7 @@ impl ScalewayCR { self.get_event_details(), EventMessage::new( "Error while interacting with Scaleway API (list_namespaces).".to_string(), - Some(format!("error: {}, image: {}", e, &image.name)), + Some(format!("error: {}, image: {}", e, namespace_name)), ), ), ); @@ -127,7 +118,7 @@ impl ScalewayCR { None, None, None, - Some(image.name.as_str()), + Some(image.name().as_str()), None, Some(self.default_project_id.as_str()), )) { @@ -139,7 +130,7 @@ impl ScalewayCR { self.get_event_details(), EventMessage::new( "Error while interacting with Scaleway API (list_namespaces).".to_string(), - Some(format!("error: {}, image: {}", e, &image.name)), + Some(format!("error: {}, image: {}", e, &image.name())), ), ), ); @@ -168,7 +159,7 @@ impl ScalewayCR { if image_to_delete.is_none() { let err = EngineError::new_container_registry_image_doesnt_exist( event_details.clone(), - image.name.to_string(), + image.name().to_string(), None, ); @@ -188,7 +179,7 @@ impl ScalewayCR { Err(e) => { let err = EngineError::new_container_registry_delete_image_error( event_details.clone(), - image.name.to_string(), + image.name().to_string(), Some(CommandError::new(e.to_string(), None)), ); @@ -199,81 +190,9 @@ impl ScalewayCR { } } - fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { - // https://www.scaleway.com/en/docs/deploy-an-image-from-registry-to-kubernetes-kapsule/ - let event_details = self.get_event_details(); - - if let Err(e) = docker_tag_and_push_image( - self.kind(), - self.get_docker_envs(), - image, - dest.to_string(), - dest_latest_tag.to_string(), - event_details.clone(), - self.logger(), - ) { - return Err(EngineError::new_docker_push_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )); - } - - let result = retry::retry(Fibonacci::from_millis(10000).take(10), || { - match self.does_image_exists(image) { - true => OperationResult::Ok(&image), - false => { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new_from_safe( - "Image is not yet available on Scaleway Registry Namespace, retrying in a few seconds...".to_string(), - ), - ), - ); - OperationResult::Retry(()) - } - } - }); - - let image_not_reachable = Err(EngineError::new_container_registry_image_unreachable_after_push( - event_details.clone(), - image.name.to_string(), - )); - - match result { - Ok(_) => Ok(PushResult { image: image.clone() }), - Err(Operation { .. }) => image_not_reachable, - Err(retry::Error::Internal(_)) => image_not_reachable, - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - let event_details = self.get_event_details(); - - if let Err(e) = docker_pull_image( - self.kind(), - self.get_docker_envs(), - dest.to_string(), - event_details.clone(), - self.logger(), - ) { - return Err(EngineError::new_docker_pull_image_error( - event_details, - image.name.to_string(), - dest.to_string(), - e, - )); - } - - Ok(PullResult::Some(image.clone())) - } - pub fn create_registry_namespace( &self, - image: &Image, + namespace_name: &str, ) -> Result { let event_details = self.get_event_details(); @@ -282,7 +201,7 @@ impl ScalewayCR { &self.get_configuration(), self.zone.region().to_string().as_str(), scaleway_api_rs::models::inline_object_29::InlineObject29 { - name: image.name.clone(), + name: namespace_name.to_string(), description: None, project_id: Some(self.default_project_id.clone()), is_public: Some(false), @@ -293,7 +212,7 @@ impl ScalewayCR { Err(e) => { let error = EngineError::new_container_registry_namespace_creation_error( event_details.clone(), - image.name.clone(), + namespace_name.to_string(), self.name_with_id(), CommandError::new(e.to_string(), Some("Can't create SCW repository".to_string())), ); @@ -308,19 +227,15 @@ impl ScalewayCR { pub fn delete_registry_namespace( &self, - image: &Image, + namespace_name: &str, ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-c1ac9b let event_details = self.get_event_details(); - let registry_to_delete = self.get_registry_namespace(image); - let repository_name = match image.registry_name.as_ref() { - None => "unknown", - Some(name) => name, - }; + let registry_to_delete = self.get_registry_namespace(namespace_name); if registry_to_delete.is_none() { let error = EngineError::new_container_registry_repository_doesnt_exist( event_details.clone(), - repository_name.to_string(), + namespace_name.to_string(), None, ); @@ -341,7 +256,7 @@ impl ScalewayCR { Err(e) => { let error = EngineError::new_container_registry_delete_repository_error( event_details.clone(), - repository_name.to_string(), + namespace_name.to_string(), Some(CommandError::new(e.to_string(), None)), ); @@ -355,23 +270,25 @@ impl ScalewayCR { pub fn get_or_create_registry_namespace( &self, - image: &Image, + namespace_name: &str, ) -> Result { + info!("Get/Create repository for {}", namespace_name); + // check if the repository already exists let event_details = self.get_event_details(); - let registry_namespace = self.get_registry_namespace(&image); + let registry_namespace = self.get_registry_namespace(namespace_name); if let Some(namespace) = registry_namespace { self.logger.log( LogLevel::Info, EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe(format!("SCW repository {} already exists", image.name.as_str())), + EventMessage::new_from_safe(format!("SCW repository {} already exists", namespace_name)), ), ); return Ok(namespace); } - self.create_registry_namespace(image) + self.create_registry_namespace(namespace_name) } fn get_docker_json_config_raw(&self) -> String { @@ -384,27 +301,6 @@ impl ScalewayCR { .as_bytes(), ) } - - fn exec_docker_login(&self, registry_url: &String) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - if docker_login( - Kind::ScalewayCr, - self.get_docker_envs(), - self.login.clone(), - self.secret_token.clone(), - registry_url.clone(), - event_details.clone(), - self.logger(), - ) - .is_err() - { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - }; - - Ok(()) - } } impl ToTransmitter for ScalewayCR { @@ -434,218 +330,49 @@ impl ContainerRegistry for ScalewayCR { Ok(()) } - fn on_create(&self) -> Result<(), EngineError> { + fn login(&self) -> Result { + let event_details = self.get_event_details(); + let mut registry = Url::parse(&format!("https://rg.{}.scw.cloud", self.zone.region())).unwrap(); + let _ = registry.set_username(&self.login); + let _ = registry.set_password(Some(&self.secret_token)); + + if self.docker.login(®istry).is_err() { + return Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, + )); + } + + Ok(ContainerRegistryInfo { + endpoint: registry, + registry_name: self.name.to_string(), + registry_docker_json_config: Some(self.get_docker_json_config_raw()), + get_image_name: Box::new(move |img_name| format!("{}/{}", img_name, img_name)), + }) + } + + fn create_registry(&self) -> Result<(), EngineError> { + // Nothing to do, scaleway managed container registry per repository (aka `namespace` by the scw naming convention) Ok(()) } - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { + fn create_repository(&self, name: &str) -> Result<(), EngineError> { + let _ = self.get_or_create_registry_namespace(name)?; Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { - let event_details = self.get_event_details(); - let registry_url = image - .registry_url - .as_ref() - .unwrap_or(&"undefined".to_string()) - .to_param(); - - if let Err(_) = docker_login( - Kind::ScalewayCr, - self.get_docker_envs(), - self.login.clone(), - self.secret_token.clone(), - registry_url.clone(), - event_details.clone(), - self.logger(), - ) { + let info = if let Ok(url) = self.login() { + url + } else { return false; - } + }; - docker_manifest_inspect( - Kind::ScalewayCr, - self.get_docker_envs(), - image.name.clone(), - image.tag.clone(), - registry_url, - event_details.clone(), - self.logger(), - ) - .is_ok() - } - - fn pull(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - let listeners_helper = ListenersHelper::new(&self.listeners); - - let mut image = image.clone(); - let registry_url: String; - - match self.get_or_create_registry_namespace(&image) { - Ok(registry) => { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Scaleway registry namespace for {} has been created", - image.name.as_str() - )), - ), - ); - image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name - image.registry_url = registry.endpoint.clone(); - image.registry_secret = Some(self.secret_token.clone()); - image.registry_docker_json_config = Some(self.get_docker_json_config_raw()); - registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string()); - } - Err(e) => { - self.logger.log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); - return Err(e); - } - } - - if !self.does_image_exists(&image) { - let info_message = format!("Image {:?} does not exist in SCR {} repository", image, self.name()); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from SCR {} repository", image, self.name()); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login(®istry_url)?; - - let dest = format!("{}/{}", registry_url, image.name_with_tag()); - - // pull image - self.pull_image(dest, &image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let event_details = self.get_event_details(); - let mut image = image.clone(); - let registry_url: String; - let registry_name: String; - - match self.get_or_create_registry_namespace(&image) { - Ok(registry) => { - image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name - image.registry_url = registry.endpoint.clone(); - image.registry_secret = Some(self.secret_token.clone()); - image.registry_docker_json_config = Some(self.get_docker_json_config_raw()); - registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string()); - registry_name = registry.name.unwrap(); - } - Err(e) => { - self.logger.log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); - return Err(e); - } - } - - let _ = self.exec_docker_login(®istry_url)?; - - let dest = format!("{}/{}", registry_url, image.name_with_tag()); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(&image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {} found on Scaleway {} repository, container build is not required", - image, registry_name, - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PushResult { image: image.clone() }); - } - - let info_message = format!( - "image {} does not exist on Scaleway {} repository, starting image upload", - image, - self.name() - ); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(info_message.to_string()), - ), - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}/{}:latest", registry_url, image.name); - self.push_image(dest, dest_latest_tag, &image) - } - - fn push_error(&self, image: &Image) -> Result { - Ok(PushResult { image: image.clone() }) + let image = docker::ContainerImage { + registry: info.endpoint, + name: image.name().clone(), + tags: vec![image.tag.clone()], + }; + self.docker.does_image_exist_remotely(&image).is_ok() } fn logger(&self) -> &dyn Logger { diff --git a/src/errors/io.rs b/src/errors/io.rs index 529cc9f1..17c63858 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -96,6 +96,7 @@ pub enum Tag { BuilderBuildpackCannotBuildContainerImage, BuilderGetBuildError, BuilderCloningRepositoryError, + DockerError, DockerPushImageError, DockerPullImageError, BuilderDockerCannotListImages, @@ -206,6 +207,7 @@ impl From for Tag { errors::Tag::ContainerRegistryRepositoryDoesntExist => Tag::ContainerRegistryRepositoryDoesntExist, errors::Tag::ContainerRegistryDeleteRepositoryError => Tag::ContainerRegistryDeleteRepositoryError, errors::Tag::BuilderDockerCannotListImages => Tag::BuilderDockerCannotListImages, + errors::Tag::DockerError => Tag::DockerError, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index a409ae8f..b6531c75 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -4,6 +4,7 @@ extern crate url; use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd; +use crate::cmd::docker::DockerError; use crate::cmd::helm::HelmError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; @@ -253,6 +254,8 @@ pub enum Tag { BuilderGetBuildError, /// BuilderCloningRepositoryError: represents an error when builder is trying to clone a git repository. BuilderCloningRepositoryError, + /// DockerError: represents an error when trying to use docker cli. + DockerError, /// DockerPushImageError: represents an error when trying to push a docker image. DockerPushImageError, /// DockerPullImageError: represents an error when trying to pull a docker image. @@ -2286,6 +2289,24 @@ impl EngineError { ) } + /// Creates new error from an Docker error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. + pub fn new_docker_error(event_details: EventDetails, error: DockerError) -> EngineError { + EngineError::new( + event_details, + Tag::DockerError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + /// Creates new error when trying to push a Docker image. /// /// Arguments: diff --git a/src/git.rs b/src/git.rs index 4222e887..0176fc9d 100644 --- a/src/git.rs +++ b/src/git.rs @@ -313,7 +313,7 @@ mod tests { fn test_git_submodule_with_ssh_key() { // Unique Key only valid for the submodule and in read access only // https://github.com/Qovery/dumb-logger/settings/keys - let ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0NCmIzQmxibk56YUMxclpYa3RkakVBQUFBQUJHNXZibVVBQUFBRWJtOXVaUUFBQUFBQUFBQUJBQUFCbHdBQUFBZHpjMmd0Y24NCk5oQUFBQUF3RUFBUUFBQVlFQTFGcS95ZGF6dU84T3ZRdjVUNEdxbndOMjhZV0EzaXlqanREMFdSQXhtdDZEV3lJRlVYZ1gNClZFZ1ZVYnZyYndKNGJQa0tTbkdqd1hZRUdJYkdYa0hKUTdvWTVSMnB6b1hqUkVYTzIzZEZ2aVp4bUpOcVdEVVJqSHhjc1INCndOYWxiOFVZZVBCRVI4TEQzWWpQd0lYNXdCWm5VSjZLWTJFbXhjSlBVUnV4bUlyTjI4QndiZ3FiejJPU3NJdWg4a1ZwSngNCldheitFc3JNM282NHpHMm0wa0dxMVI1VHE0enBPRWliUk1iY1ZXTldKUzRZR29JczdsRzB0ZHZndktNRnJsWktzSUw1Y2ENCkFOQzRXTlROMm1DVVFrVGpGSDVySDlDa0ZBZjZaZ0lqYklvN0s3TTc0L1B5RVhEcStyRW5vRWdzeEkzRi9NZHMydGM2RWkNClJaY2JrUmRLVnpaUzJCMXdKNDhrOGR3Sml5VytKSWY4ejEzK2FiUXVPNGR5MWRnM2gwbEZ6dm9qaVYxTjNBRXdHcmhjZEUNClo3TXNaeThKM3JvRElZSWZCczdkbmh2T1FrME1taEpKSEpMaVlEZWZCYUk4MVdGTGlqekUxejhqMG90cExlNkt0SVhQYk8NCmV5WWdod0U2aDlhSmNrOEU3WklYMjc4MGRQMW93T2g1dC9VaE0vdjFBQUFGZ082eU9GenVzamhjQUFBQUIzTnphQzF5YzINCkVBQUFHQkFOUmF2OG5XczdqdkRyMEwrVStCcXA4RGR2R0ZnTjRzbzQ3UTlGa1FNWnJlZzFzaUJWRjRGMVJJRlZHNzYyOEMNCmVHejVDa3B4bzhGMkJCaUd4bDVCeVVPNkdPVWRxYzZGNDBSRnp0dDNSYjRtY1ppVGFsZzFFWXg4WExFY0RXcFcvRkdIancNClJFZkN3OTJJejhDRitjQVdaMUNlaW1OaEpzWENUMUVic1ppS3pkdkFjRzRLbTg5amtyQ0xvZkpGYVNjVm1zL2hMS3pONk8NCnVNeHRwdEpCcXRVZVU2dU02VGhJbTBURzNGVmpWaVV1R0JxQ0xPNVJ0TFhiNEx5akJhNVdTckNDK1hHZ0RRdUZqVXpkcGcNCmxFSkU0eFIrYXgvUXBCUUgrbVlDSTJ5S095dXpPK1B6OGhGdzZ2cXhKNkJJTE1TTnhmekhiTnJYT2hJa1dYRzVFWFNsYzINClV0Z2RjQ2VQSlBIY0NZc2x2aVNIL005ZC9tbTBManVIY3RYWU40ZEpSYzc2STRsZFRkd0JNQnE0WEhSR2V6TEdjdkNkNjYNCkF5R0NId2JPM1o0YnprSk5ESm9TU1J5UzRtQTNud1dpUE5WaFM0bzh4TmMvSTlLTGFTM3VpclNGejJ6bnNtSUljQk9vZlcNCmlYSlBCTzJTRjl1L05IVDlhTURvZWJmMUlUUDc5UUFBQUFNQkFBRUFBQUdCQUxhR1pqRkwvV0NwQWtjV0lxM25LMHZRZzQNCjBuamxQcGxKQXVKTWprOVc1RGNpNkQrSVJGTC9BK29TeUcxTit2Qk9uTnliMmhIZnNzd0dxQWRjTVEwcmtISFZ6WitWbk4NCmxVSGFxdW5UQkR4aitPSUhXN0lEczFqSWtEZWZnQngyTmh5eDR3anRBTHBhVW1ja1B1SkhTcURSV3JvQkc1c01Uc3RwWmwNCnNtb0diTmxFK0o1dE9lMnhqYVYzNzdRNVd4L0FIemd0T09RemZNL3lTZjMzTDhCS1Y0a3J4eXV3ZW95T1Q5OU9ia0ltaUUNCnpTMEQxVERuUStmSTNjdm1aL3lvcDZ0clA0a01wdWtWdC93ZUhFWU5nZkdPdHVHMndwU3oyRmpNcUcyT1NFd3ZpRXM3U0YNCmlwTGNWc2dpUzg3ckI5ZFBRejFYTGhhdW9MTDliY3BlOE9sZW50VkI5VHFaU1lqaTJoeUNtZG5id25CS2QyMGVaUlh0S3QNCnh3SUpDdkpESGwyWk9wTVVUcnIydFcwSkVFZU1QSDJWMCs4amg3aGxlQ0NLcDhmdE1pcGVuWTdvelR1M1JVTUdNcjB4eTINCmhUalVJNkVGU0ppVGlKVE9ibGVhcGVPMVE1czdHaU5ibmdZQXFhN3h3RmJuYllrODJ3ekxPbzdEUjYzODhJbzVQcEFRQUENCkFNQUtXbURSMWU5bXlncm8wZmtQUDQ3dGsxMnF5bWpkQzVtRU1SNm9TOTNMbGRaK1ptKzBxVlBxN1BSQ3JPZlpLcFJSQ1UNCmJOUkM0ZFJhUHk0ek85cEdqdzE3ZlhjUGxGQzRaQUN1anhnRzhvazdYNEdGVlZEQ2lySFRySFhWN0ozNUtPMnR5MloyR2UNCms2L0dhMUpCMlBLN0tJZFlnMWpjY3lUR0FsZTlmcjIyU21nZHVoUmt2WlZsVU9mMHp2ZDhERzlVcktYUURWTERHd1QrWlkNClp2ODhYdGduZzZneU1jZXhZaHZZY04yMUo4ay9wNmM1ZGVuUXNNL0QxN0Qyck9iNE1BQUFEQkFPcDBJWitTVWxXY0xzbjMNCmVwQk1pTVAwdm5LUTI4UUd4NDl1bW14VXdhMTI0djk5YzhtTXZ5TXJPYnFsODdjZjQwWTlqdUhsSGZKSzd0MXhNdE5qU3QNCkJWRlNjU2E5Sk56S0hKRTJaYlJma1d1ZXpScytGbytKcjU0YVppQjNvcjNFeUtaamNZY2RFTG5ROHNjNmJXd25Ic29WSHkNCmNpTThtcUhudHRqeXJPZFdJRi9CTURlYjF5WkliYlQ0aWN3Y1N2TEJOVE95dllwakg1RWNsTXdXcWlsQ2NxVVJyTmtZVXMNCnJWZkFabDZuUmE5N0FNNDd6THhBT0RZT1FzbjZhdk5RQUFBTUVBNTk2ejRYZkxrQ09MT3drUi85NS90WEYzS3p4MjFsdC8NCllBVExmRlBKbHdNaGRxN1d2VG9LZWxNV0QwNUxXYlZxYitNOGU3SWZSQlducEp0V1RxMVBCY3ltT2k1TkprSmZnWWhqdGgNCjlqT1k4WTVCWWlvcENRUUFtTWc3SHF3a0xUSUdUU25IdDN5ZGFTK21TaVFTQUhLb1VKbmp4cEdLQ3ZyVGk5eHdxTFpZT1YNClZvOHFCZ003M1c1TWUyQWI0YnpPaEt4Tm9iTFpqWkxqZDJoeHRyWENJaityRXVRa09NT1hGTmR6NkFDR0hwQ09KTGp4clUNCmk4TGNwd2c5NlpWZkhCQUFBQUNtVnlaV0psUUhOMGVYZz0NCi0tLS0tRU5EIE9QRU5TU0ggUFJJVkFURSBLRVktLS0tLQ==").unwrap()).unwrap(); + let ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFNd0FBQUF0emMyZ3RaVwpReU5UVXhPUUFBQUNBTzZlaGNrV0JrNlcwd3lTZ0FIY0dSY3JneW1IVThqRWVKRm5yQ2k1ZjZaQUFBQUpERlV0TVZ4VkxUCkZRQUFBQXR6YzJndFpXUXlOVFV4T1FBQUFDQU82ZWhja1dCazZXMHd5U2dBSGNHUmNyZ3ltSFU4akVlSkZuckNpNWY2WkEKQUFBRUQ0aGwvTmk0aGgvK3oxUm4wdWtMcm5mQ0xrN1BUWmErbVNQYk01ZS9aS0pnN3A2RnlSWUdUcGJUREpLQUFkd1pGeQp1REtZZFR5TVI0a1dlc0tMbC9wa0FBQUFDbVZ5WldKbFFITjBlWGdCQWdNPQotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K").unwrap()).unwrap(); let invalid_ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQ21GbGN6STFOaTFqZEhJQUFBQUdZbU55ZVhCMEFBQUFHQUFBQUJCNzZzbWIzVgp5WFB3SE12dm8zWTB5M0FBQUFFQUFBQUFFQUFBR1hBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDOVZHbm13cjZCClRHdWxzODhEaXRXaE5IUUoxMjV0eGxHa2EzNDNxUVB2S3dSc2VxN05SdFAzY2IxbDRMZytzdWozZ0lQYU5yM295SlBoRDIKZmIxbzF1cUFiOStkbWhwQXc4L1lCa05NZkRrdDRTWEpGZjZ3dUZwa1p4SHF3czNZUXF6cjhicVJaaHA0bXlnc2VwNFVHOApBaGxVMG5CUXFBREFhS3dBcmpLeUdBeWwwenRDYVdObm9sOVRZSmZuNEpOQW5YUDFONmMxMUVaRm5wKzJsMTVoSVdNd2NKClpCMnFFeTFSZzFVNXpuOVNSOURIVXhvN2p0ZkkrdWJWbHdnelBQaDVjZzAydVc0K0JwcFg1UGlpZ04rQlBNajc3WEJ0VTQKZzU3MmRDZHBSRjk3NjJ5SDBsY21nSkRqVnhnOTludVVGRDlwVG9nUTRrUENrdUluNmcxS3JObFdqY1R2c1hFS2JVS0xqawpkQkR2Yk1tbzZBaHJXRFhDSjZqRUN0T2Jka29XMGVjTGU4cXB3Nmh5N1NmdWppSm9QbnVsazRWenMwR2xPa3VPU0JIUmhJClhSc25NaFNiNnh2dDl6QldJcklvZDZoWnhuQ0V2SWRESzlacVBnOXJpbXc4bG8rUkFwdm1ySnRINUhsbFJiYWh4K2RUU1cKM2hCa1BlMnNDL1UvRUFBQVdBVXBEOTFIQTAzSnQyNFFSSFVXRDAvVTJGMTBzZE5WN0w4bkhMeVNibFBnSFhMc3lpSTFxOQo0NXBOUEQyNElBakNzQ08rVHREcXc3MDhlNXliUWhXUCsybkxtdGQwclEyTXh3SnZwUjlGcEV6UDFyejRYUDVUbzZDN3N1CmZpd0JPZWd6bjhQT1hGSmRvRk9Ud3E3dWhaM201NE93NHZvZkFKSHdtYWtwTGZMd2R1TnQ3S1RNQkVpT3VlM0ZXTGtCR0wKQUE1RGtoYVlpVGgyajB2YU9jUWhxZVphVEp6V2tidUcvb29DK1cwcTVXcFNZdFlxREFhWEh0bG8rZGtOMFEzZVVhcm1FTQpGcy9tdEpha3dhOVhCMVgzMndKbUpIdmN0OG4vVzA1T0N5V0U1Y2szeitRQVB3a2pGK0hKOGlOZDluVk5zckx1T010a2VQCk1aMTZreTg5WUVSZVQ1QXRJU1lRd0JQU2tsTFZKL3VaOCszK2Vyc3JrOW1aakw3ZXpISnV4ZysxUmR1T3BPeWpXMTRoTGYKblJQTDlKOXgvZWZ2MFV0L3BpR3M5NEFRcFFVZnJFdXpjL1dmejRocUtzVUxnT0VnblZBWXpuSksyWHJGeTN4aWlKVkFVUQpZcm4xak9lU1oyTWV0cjJvd05VdVM3cEhGTHZIWURRWklURmxVaFlOYUx0ejV5WU9HTCtFbEVxQm4wT1FFenNESDhROEpFCk5jWGVxUjFRTE4rTUJaMFZqQ2Q3T0ExTGpXZVVrdjNMaFJER3lPS3RjWk5OeFl5MkgwRWlmYzIvRHpLMnlpcVRQWUdMbHYKOWhZTlZZcC8xOGxhUkFOL040MlVDMjRmS0hFZ2lYVTNnL3RCZkZmbEFBWThKSE9sQUJEdXFWYjJkWHZKdXFLeUJMUElqVQo5cVl5VXNOVXhWS2M2ZWh4VU4wcVlnTmV2Z0JmMXVSZkxCY2c3SjVJVDZQQ2dSa3lNenBRakY1RkhuM0J6SVMrb3ZFSnNaCk5LNklYbDJIY3FncExTWUFkTFZlZEZOUzlkVU01blpMdlJEMjkyc0FQWm5aaU91Z3pwSWNrMllFcXpscjc2NXlUakRJdWgKR3kvdFlBQ3FIZHV4S2pMdGc0OXpjZjdNN2xESGNuVEY1MlJsazEyR2x1emZGK1dhZDF3eUFKVnNyUmtqVFZYVHhnTEV6MQo4SzF0WUtVOWoyc3grUE1Vd0JxM3lQR2lTaEgydWp6em82SUc1cnVYSTAwZXVkT2t1NVVrSHhBVnJneUI1S0M2VFRMR1BYCnhQMFN5Zk12dXJycDdvMnhsK2dkSVc0c0dudEJ2V0RHRVFSY0RxbWdLV0tuNTNsbmg5U1Urcmh2UkdhRFJueENuYkNwUEUKTE82V0lKUXVPQm54bzhWcGU0R2JLc2NmSktKSzlZV2ZIOFEvYzBncnE0ZDh5ZmRwUG1uc3hHOEpoTFVuMEhpRFEzQytaMgpzU1RPeU85TDAySUZIdDdIUEY2OWRWR3c3M0pPU1FiL05GK2g5cGRVazBScGNRdGFaTm9TMHg2a3RCQXljK0o0VUpUYTliCkdENWRaSE1KVHBvcWFZUDV0dFlnMjlBQkpUUURMa0tnbWxWRGNtK28zRTN3cTlySWFXMlhpNDQrc3RnTVJVS1J5R041d1EKM2xTWjk1QXBpWFlpRkNONUVrWitUci96TDAraVdwUHRCRzlJZmlGbmlqVlVYUnpEWHZxeGE1QTQ1YUlNWDhad2U5ckxFdAphaVRaOUI5d2tVb0tYdXlDU3plQXhMTGU2aG8wLzBDbmhSR3NoVGg1UDd6aFA4bVExRGZMYlFCRU0zOHJMWlplMExVVVhZCkZpZkFXc3BFRDk2VjBMckhxRkd0Z0dzd1NQcWRBRzBPTDBWekRUbFRucDJVWDY0SEhjUzF2MUMyQnNxbllWbkJNL3p5aUYKQXhabDB4cGRPUVVuKzV2V2VHUXZsQkhGeU0vQmtXRVhMbjc1YVNQL3JwcnlZeGdOeWx2M2NiRWNYZXoyWXdLM2UrN1NnZAoxRzFZUVVtNStqNy90Q0x5aFluL1VjRzJhTHJNc3pRY1FoWTE4Sk9IOXF6a2FacWdYckFybnE0dWluT25sbFBKaGJ3ZTVrCmgvMmdyTlVqbEsrRHYxQ2dGZUVDcm9yRHo4L3ZxZW1QNXdVWWF5bFNWWVZ3UHM1bkxDQWUrVlNobFlIOXlNb3JwanNXc3MKYlg0UlAvVGd3TmNtRnBuZ21kTXppNmtIUXhSc2pUT3VxZ3Vsb01FUVZmQ3JkNGxBeWp3eVhRaEcrd2dWMXBuempCZlR4eQpZeFBrc1VGaTg3aEVkZ1RPZ2M5MHlNamVoVGhHOGRMWGEvd0NOU0hLZ1pBbFBZbWdLd2ZvcFlBMjQxdUlxR2J0WUtqSTFSCnVHU2JqSU80dUVYbkJ5eWVZTnA3Z29iR2NVc1BGV0doY1FPV05QZnl5K1crQ0xhKzVpYkJCZEF2NStVdlZZUHFGMHhTNy8KUm1TbW9BPT0KLS0tLS1FTkQgT1BFTlNTSCBQUklWQVRFIEtFWS0tLS0t").unwrap()).unwrap(); let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_submodule".to_string()); let get_credentials = |user: &str| { diff --git a/src/models.rs b/src/models.rs index 8a88f4bb..128634f2 100644 --- a/src/models.rs +++ b/src/models.rs @@ -2,16 +2,15 @@ use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; use std::hash::Hash; use std::net::Ipv4Addr; -use std::path::Path; use std::str::FromStr; use std::sync::Arc; use chrono::{DateTime, Utc}; -use git2::{Cred, CredentialType, Error}; use itertools::Itertools; use rand::distributions::Alphanumeric; use rand::Rng; use serde::{Deserialize, Serialize}; +use url::Url; use crate::build_platform::{Build, BuildOptions, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::aws::databases::mongodb::MongoDB; @@ -22,7 +21,7 @@ use crate::cloud_provider::service::{DatabaseOptions, StatefulService, Stateless use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; -use crate::git; +use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::utilities::get_image_tag; @@ -102,17 +101,14 @@ impl Environment { pub fn to_qe_environment( &self, context: &Context, - built_applications: &Vec>, cloud_provider: &dyn CloudProvider, + container_registry: &ContainerRegistryInfo, logger: Box, ) -> crate::cloud_provider::environment::Environment { let applications = self .applications .iter() - .map(|x| match built_applications.iter().find(|y| x.id.as_str() == y.id()) { - Some(app) => x.to_stateless_service(context, app.image().clone(), cloud_provider, logger.clone()), - _ => x.to_stateless_service(context, x.to_image(), cloud_provider, logger.clone()), - }) + .map(|x| x.to_stateless_service(context, x.to_image(container_registry), cloud_provider, logger.clone())) .filter(|x| x.is_some()) .map(|x| x.unwrap()) .collect::>(); @@ -365,52 +361,24 @@ impl Application { } } - pub fn to_image(&self) -> Image { - self.to_image_with_commit(&self.commit_id) - } - - pub fn to_image_from_parent_commit

(&self, clone_repo_into_dir: P) -> Result, Error> - where - P: AsRef, - { - let parent_commit_id = git::get_parent_commit_id( - self.git_url.as_str(), - self.commit_id.as_str(), - clone_repo_into_dir, - &|_| match &self.git_credentials { - None => vec![], - Some(creds) => vec![( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(creds.login.as_str(), creds.access_token.as_str()).unwrap(), - )], - }, - )?; - - Ok(match parent_commit_id { - Some(id) => Some(self.to_image_with_commit(&id)), - None => None, - }) - } - - pub fn to_image_with_commit(&self, commit_id: &String) -> Image { + pub fn to_image(&self, cr_info: &ContainerRegistryInfo) -> Image { Image { application_id: self.id.clone(), - name: self.name.clone(), + name: (cr_info.get_image_name)(&self.name), tag: get_image_tag( &self.root_path, &self.dockerfile_path, &self.environment_vars, - commit_id, + &self.commit_id, ), commit_id: self.commit_id.clone(), - registry_name: None, - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, + registry_name: cr_info.registry_name.clone(), + registry_url: cr_info.endpoint.clone(), + registry_docker_json_config: cr_info.registry_docker_json_config.clone(), } } - pub fn to_build(&self) -> Build { + pub fn to_build(&self, registry_url: &ContainerRegistryInfo) -> Build { // Retrieve ssh keys from env variables const ENV_GIT_PREFIX: &str = "GIT_SSH_KEY"; let env_ssh_keys: Vec<(String, String)> = self @@ -471,7 +439,7 @@ impl Application { root_path: self.root_path.clone(), buildpack_language: self.buildpack_language.clone(), }, - image: self.to_image(), + image: self.to_image(registry_url), options: BuildOptions { environment_variables: self .environment_vars @@ -1159,7 +1127,7 @@ pub struct Context { workspace_root_dir: String, lib_root_dir: String, test_cluster: bool, - docker_host: Option, + docker_host: Option, features: Vec, metadata: Option, } @@ -1172,13 +1140,13 @@ pub enum Features { // trait used to reimplement clone without same fields // this trait is used for Context struct -pub trait Clone2 { +pub trait CloneForTest { fn clone_not_same_execution_id(&self) -> Self; } // for test we need to clone context but to change the directory workspace used // to to this we just have to suffix the execution id in tests -impl Clone2 for Context { +impl CloneForTest for Context { fn clone_not_same_execution_id(&self) -> Context { let mut new = self.clone(); let suffix = rand::thread_rng() @@ -1199,7 +1167,7 @@ impl Context { workspace_root_dir: String, lib_root_dir: String, test_cluster: bool, - docker_host: Option, + docker_host: Option, features: Vec, metadata: Option, ) -> Self { @@ -1236,8 +1204,8 @@ impl Context { self.lib_root_dir.as_str() } - pub fn docker_tcp_socket(&self) -> Option<&String> { - self.docker_host.as_ref() + pub fn docker_tcp_socket(&self) -> &Option { + &self.docker_host } pub fn metadata(&self) -> Option<&Metadata> { @@ -1276,16 +1244,6 @@ impl Context { } } - pub fn docker_build_options(&self) -> Option> { - match &self.metadata { - Some(meta) => meta - .docker_build_options - .clone() - .map(|b| b.split(' ').map(|x| x.to_string()).collect()), - _ => None, - } - } - // Qovery features pub fn is_feature_enabled(&self, name: &Features) -> bool { for feature in &self.features { @@ -1303,7 +1261,6 @@ impl Context { pub struct Metadata { pub dry_run_deploy: Option, pub resource_expiration_in_seconds: Option, - pub docker_build_options: Option, pub forced_upgrade: Option, pub disable_pleco: Option, } @@ -1312,14 +1269,12 @@ impl Metadata { pub fn new( dry_run_deploy: Option, resource_expiration_in_seconds: Option, - docker_build_options: Option, forced_upgrade: Option, disable_pleco: Option, ) -> Self { Metadata { dry_run_deploy, resource_expiration_in_seconds, - docker_build_options, forced_upgrade, disable_pleco, } diff --git a/src/transaction.rs b/src/transaction.rs index 82f3cb72..404b044f 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,10 +1,7 @@ -use std::collections::HashMap; use std::thread; -use crate::build_platform::BuildResult; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::{Application, Service}; -use crate::container_registry::PushResult; +use crate::cloud_provider::service::Service; use crate::engine::EngineConfig; use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EventMessage}; @@ -102,129 +99,48 @@ impl<'a> Transaction<'a> { Ok(()) } - fn load_build_app_cache(&self, app: &crate::models::Application) -> Result<(), EngineError> { - let container_registry = self.engine.container_registry(); - let mut image = app.to_image(); - - image.tag = String::from("latest"); - // pull image from container registry - // FIXME: if one day we use something else than LocalDocker to build image - // FIXME: we'll need to send the PullResult to the Build implementation - let _ = match container_registry.pull(&image) { - Ok(pull_result) => pull_result, - Err(err) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - err.clone(), - Some(EventMessage::new_from_safe( - "Something goes wrong while pulling image from container registry".to_string(), - )), - ), - ); - - return Err(err); - } - }; - - Ok(()) - } - - fn build_applications( + fn build_and_push_applications( &self, environment: &Environment, option: &DeploymentOption, - ) -> Result>, EngineError> { + ) -> Result<(), EngineError> { // do the same for applications let apps_to_build = environment .applications .iter() // build only applications that are set with Action: Create - .filter(|app| app.action == Action::Create); - - let application_and_result_tuples = apps_to_build - .map(|app| { - let image = app.to_image(); - let build_result = if option.force_build || !self.engine.container_registry().does_image_exists(&image) - { - // If an error occurred we can skip it. It's not critical. - let _ = self.load_build_app_cache(app); - - // only if the build is forced OR if the image does not exist in the registry - self.engine - .build_platform() - .build(app.to_build(), option.force_build, &self.is_transaction_aborted) - } else { - // use the cache - Ok(BuildResult::new(app.to_build())) - }; - - (app, build_result) - }) + .filter(|app| app.action == Action::Create) .collect::>(); - let mut applications: Vec> = Vec::with_capacity(application_and_result_tuples.len()); - for (application, result) in application_and_result_tuples { - // catch build error, can't do it in Fn - let build_result = match result { - Err(err) => { - error!("build error for application {}: {:?}", application.id.as_str(), err); - return Err(err); - } - Ok(build_result) => build_result, - }; - - if let Some(app) = application.to_application( - self.engine.context(), - &build_result.build.image, - self.engine.cloud_provider(), - self.logger.clone(), - ) { - applications.push(app) - } + // If nothing to build, do nothing + if apps_to_build.is_empty() { + return Ok(()); } - Ok(applications) - } + // Do setup of registry and be sure we are login to the registry + let cr_registry = self.engine.container_registry(); + let _ = cr_registry.create_registry()?; + let registry = self.engine.container_registry().login()?; - fn push_applications( - &self, - applications: Vec>, - option: &DeploymentOption, - ) -> Result, PushResult)>, EngineError> { - let application_and_push_results: Vec<_> = applications - .into_iter() - .map(|mut app| { - match self.engine.container_registry().push(app.image(), option.force_push) { - Ok(push_result) => { - // I am not a big fan of doing that but it's the most effective way - app.set_image(push_result.image.clone()); - Ok((app, push_result)) - } - Err(err) => Err(err), - } - }) - .collect(); + for app in apps_to_build.into_iter() { + let app_build = app.to_build(®istry); - let mut results: Vec<(Box, PushResult)> = vec![]; - for result in application_and_push_results.into_iter() { - match result { - Ok(tuple) => results.push(tuple), - Err(err) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - err.clone(), - Some(EventMessage::new_from_safe("Error pushing docker image".to_string())), - ), - ); - - return Err(err); - } + // If image already exist in the registry, skip the build + if !option.force_build && cr_registry.does_image_exists(&app_build.image) { + continue; } + + // Be sure that our repository exist before trying to pull/push images from it + let _ = self.engine.container_registry().create_repository(&app.name)?; + + // Ok now everything is setup, we can try to build the app + let _ = self + .engine + .build_platform() + .build(app_build, &self.is_transaction_aborted)?; } - Ok(results) + Ok(()) } pub fn rollback(&self) -> Result<(), RollbackError> { @@ -269,63 +185,11 @@ impl<'a> Transaction<'a> { /// This function is a wrapper to correctly revert all changes of an attempted deployment AND /// if a failover environment is provided, then rollback. - fn rollback_environment(&self, environment_action: &EnvironmentAction) -> Result<(), RollbackError> { - let qe_environment = |environment: &Environment| { - let mut _applications = Vec::with_capacity(environment.applications.len()); - for application in environment.applications.iter() { - let build = application.to_build(); - - if let Some(x) = application.to_application( - self.engine.context(), - &build.image, - self.engine.cloud_provider(), - self.logger.clone(), - ) { - _applications.push(x) - } - } - - let qe_environment = environment.to_qe_environment( - self.engine.context(), - &_applications, - self.engine.cloud_provider(), - self.logger.clone(), - ); - - qe_environment - }; - - match environment_action { - EnvironmentAction::Environment(te) => { - // revert changes but there is no failover environment - let target_qe_environment = qe_environment(te); - - let action = match te.action { - Action::Create => self - .engine - .kubernetes() - .deploy_environment_error(&target_qe_environment), - Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), - Action::Delete => self - .engine - .kubernetes() - .delete_environment_error(&target_qe_environment), - Action::Nothing => Ok(()), - }; - - let _ = match action { - Ok(_) => {} - Err(err) => return Err(RollbackError::CommitError(err)), - }; - - Err(RollbackError::NoFailoverEnvironment) - } - } + fn rollback_environment(&self, _environment_action: &EnvironmentAction) -> Result<(), RollbackError> { + Ok(()) } pub fn commit(mut self) -> TransactionResult { - let mut applications_by_environment: HashMap<&Environment, Vec>> = HashMap::new(); - for step in self.steps.clone().into_iter() { // execution loop self.executed_steps.push(step.clone()); @@ -372,7 +236,7 @@ impl<'a> Transaction<'a> { EnvironmentAction::Environment(te) => te, }; - let applications_builds = match self.build_applications(target_environment, &option) { + match self.build_and_push_applications(target_environment, &option) { Ok(apps) => apps, Err(engine_err) => { self.logger.log( @@ -392,30 +256,6 @@ impl<'a> Transaction<'a> { }; } }; - - if (self.is_transaction_aborted)() { - return TransactionResult::Canceled; - } - - let applications = match self.push_applications(applications_builds, &option) { - Ok(results) => { - let applications = results.into_iter().map(|(app, _)| app).collect::>(); - - applications - } - Err(engine_err) => { - warn!("ROLLBACK STARTED! an error occurred {:?}", engine_err); - return match self.rollback() { - Ok(_) => TransactionResult::Rollback(engine_err), - Err(err) => { - error!("ROLLBACK FAILED! fatal error: {:?}", err); - TransactionResult::UnrecoverableError(engine_err, err) - } - }; - } - }; - - applications_by_environment.insert(target_environment, applications); } Step::DeployEnvironment(environment_action) => { if (self.is_transaction_aborted)() { @@ -423,7 +263,7 @@ impl<'a> Transaction<'a> { } // deploy complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(environment_action, |qe_env| { self.engine.kubernetes().deploy_environment(qe_env) }) { TransactionResult::Ok => {} @@ -439,7 +279,7 @@ impl<'a> Transaction<'a> { } // pause complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(environment_action, |qe_env| { self.engine.kubernetes().pause_environment(qe_env) }) { TransactionResult::Ok => {} @@ -455,7 +295,7 @@ impl<'a> Transaction<'a> { } // delete complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(environment_action, |qe_env| { self.engine.kubernetes().delete_environment(qe_env) }) { TransactionResult::Ok => {} @@ -534,12 +374,7 @@ impl<'a> Transaction<'a> { } } - fn commit_environment( - &self, - environment_action: &EnvironmentAction, - applications_by_environment: &HashMap<&Environment, Vec>>, - action_fn: F, - ) -> TransactionResult + fn commit_environment(&self, environment_action: &EnvironmentAction, action_fn: F) -> TransactionResult where F: Fn(&crate::cloud_provider::environment::Environment) -> Result<(), EngineError>, { @@ -547,16 +382,11 @@ impl<'a> Transaction<'a> { EnvironmentAction::Environment(te) => te, }; - let empty_vec = Vec::with_capacity(0); - let built_applications = match applications_by_environment.get(target_environment) { - Some(applications) => applications, - None => &empty_vec, - }; - + let registry_info = self.engine.container_registry().login().unwrap(); let qe_environment = target_environment.to_qe_environment( self.engine.context(), - built_applications, self.engine.cloud_provider(), + ®istry_info, self.logger.clone(), ); diff --git a/test_utilities/Cargo.lock b/test_utilities/Cargo.lock index f794d3af..2b2e33bf 100644 --- a/test_utilities/Cargo.lock +++ b/test_utilities/Cargo.lock @@ -2147,6 +2147,7 @@ dependencies = [ "tracing-subscriber", "trust-dns-resolver", "url 2.2.2", + "urlencoding", "uuid 0.8.2", "walkdir", ] @@ -3321,6 +3322,7 @@ dependencies = [ "time 0.2.24", "tracing", "tracing-subscriber", + "url 2.2.2", "uuid 0.8.2", ] @@ -3957,6 +3959,12 @@ dependencies = [ "url 1.7.2", ] +[[package]] +name = "urlencoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" + [[package]] name = "uuid" version = "0.7.4" diff --git a/test_utilities/Cargo.toml b/test_utilities/Cargo.toml index f777f2d0..01e81fbd 100644 --- a/test_utilities/Cargo.toml +++ b/test_utilities/Cargo.toml @@ -28,6 +28,7 @@ hashicorp_vault = "2.0.1" maplit = "1.0.2" uuid = { version = "0.8", features = ["v4"] } const_format = "0.2.22" +url = "2.2.2" # Digital Ocean Deps digitalocean = "0.1.1" diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 77871e97..8276d917 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -9,7 +9,6 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; use qovery_engine::cloud_provider::Kind::Aws; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; -use qovery_engine::container_registry::docker_hub::DockerHub; use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; @@ -54,17 +53,6 @@ pub fn container_registry_ecr(context: &Context) -> ECR { ) } -pub fn container_registry_docker_hub(context: &Context) -> DockerHub { - DockerHub::new( - context.clone(), - "my-docker-hub-id-123", - "my-default-docker-hub", - "qoveryrd", - "3b9481fe-74e7-4d7b-bc08-e147c9fd4f24", - logger(), - ) -} - pub fn aws_default_engine_config(context: &Context, logger: Box) -> EngineConfig { AWS::docker_cr_engine( &context, @@ -75,7 +63,6 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> None, ) } - impl Cluster for AWS { fn docker_cr_engine( context: &Context, diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 6bfb7081..029ec8db 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -6,7 +6,7 @@ use chrono::Utc; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::models::{ - Action, Application, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, + Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 31d653d6..656db11e 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -1,5 +1,4 @@ use const_format::formatcp; -use qovery_engine::build_platform::Image; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; @@ -37,7 +36,7 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { DOCR::new( context.clone(), DOCR_ID, - "default-docr-registry-qovery-do-test", + DOCR_ID, secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), logger(), ) @@ -163,11 +162,11 @@ impl Cluster for DO { pub fn clean_environments( context: &Context, - environments: Vec, + _environments: Vec, secrets: FuncTestsSecrets, _region: DoRegion, ) -> Result<(), EngineError> { - let do_cr = DOCR::new( + let _do_cr = DOCR::new( context.clone(), "test", "test", @@ -178,14 +177,23 @@ pub fn clean_environments( logger(), ); + // FIXME: re-enable it, or let pleco do its job ? + /* // delete images created in registry + let registry_url = do_cr.login()?; for env in environments.iter() { - for image in env.applications.iter().map(|a| a.to_image()).collect::>() { - if let Err(e) = do_cr.delete_image(&image) { - return Err(e); - } + for image in env + .applications + .iter() + .map(|a| a.to_image(®istry_url)) + .collect::>() + { + //if let Err(e) = do_cr.delete_registry(&image.name) { + // return Err(e); + //} } } + */ Ok(()) } diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index ce61d38e..c6149d54 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -18,6 +18,7 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Scw; +use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; @@ -239,8 +240,14 @@ pub fn clean_environments( ); // delete images created in registry + let registry_url = container_registry_client.login()?; for env in environments.iter() { - for image in env.applications.iter().map(|a| a.to_image()).collect::>() { + for image in env + .applications + .iter() + .map(|a| a.to_image(®istry_url)) + .collect::>() + { if let Err(e) = container_registry_client.delete_image(&image) { return Err(e); } diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index ca06c1d8..7c8ab263 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -80,7 +80,6 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { None => Some(7200), } }, - docker_build_options: Some("--network host".to_string()), forced_upgrade: Option::from({ match env::var_os("forced_upgrade") { Some(_) => true, @@ -363,7 +362,7 @@ impl FuncTestsSecrets { } pub fn build_platform_local_docker(context: &Context, logger: Box) -> LocalDocker { - LocalDocker::new(context.clone(), "oxqlm3r99vwcmvuj", "qovery-local-docker", logger) + LocalDocker::new(context.clone(), "oxqlm3r99vwcmvuj", "qovery-local-docker", logger).unwrap() } pub fn init() -> Instant { diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 43f00166..c1262911 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -3,19 +3,18 @@ extern crate test_utilities; use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, + Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, }; -use test_utilities::aws::{aws_default_engine_config, AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; +use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TYPE}; use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; -use test_utilities::common::{test_db, Cluster, ClusterDomain, Infrastructure}; +use test_utilities::common::{test_db, Infrastructure}; /** ** diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index cc767323..aaed2d71 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -5,16 +5,13 @@ use self::test_utilities::utilities::{ engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; -use test_utilities::aws::{aws_default_engine_config, container_registry_ecr, AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; -use test_utilities::utilities::{build_platform_local_docker, context, init, kubernetes_config_path}; +use test_utilities::aws::aws_default_engine_config; +use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; // TODO: @@ -75,98 +72,6 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { }) } -#[cfg(feature = "test-aws-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .AWS_TEST_ORGANIZATION_ID - .as_ref() - .expect("AWS_TEST_ORGANIZATION_ID is not set") - .as_str(), - secrets - .AWS_TEST_CLUSTER_ID - .as_ref() - .expect("AWS_TEST_CLUSTER_ID is not set") - .as_str(), - ); - let engine_config = aws_default_engine_config(&context, logger()); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let ecr = container_registry_ecr(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match ecr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match ecr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match ecr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match ecr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); - }) -} - #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index fbcae223..c62d6439 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -7,9 +7,8 @@ use self::test_utilities::utilities::{ use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; -use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; +use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; -use std::borrow::Borrow; use std::str::FromStr; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 873cee85..a975987f 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -3,7 +3,7 @@ use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, + Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, }; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ @@ -11,11 +11,10 @@ use test_utilities::utilities::{ }; use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; -use test_utilities::common::{database_test_environment, test_db, working_minimal_environment, Infrastructure}; +use test_utilities::common::{database_test_environment, test_db, Infrastructure}; use test_utilities::digitalocean::{ - clean_environments, do_default_engine_config, DO_KUBERNETES_VERSION, DO_MANAGED_DATABASE_DISK_TYPE, - DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, - DO_TEST_REGION, + clean_environments, do_default_engine_config, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, + DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, DO_TEST_REGION, }; /** @@ -437,7 +436,6 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[ignore] #[named] #[test] -#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -454,7 +452,6 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[ignore] #[named] #[test] -#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 5f0e3c71..8d965c39 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -6,16 +6,13 @@ use self::test_utilities::utilities::{ engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; use test_utilities::common::Infrastructure; -use test_utilities::digitalocean::{container_registry_digital_ocean, do_default_engine_config, DO_KUBERNETES_VERSION}; -use test_utilities::utilities::{build_platform_local_docker, context}; +use test_utilities::digitalocean::do_default_engine_config; +use test_utilities::utilities::context; use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on DigitalOcean infrastructure. @@ -78,96 +75,6 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { }) } -#[cfg(feature = "test-do-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .DIGITAL_OCEAN_TEST_ORGANIZATION_ID - .as_ref() - .expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"), - secrets - .DIGITAL_OCEAN_TEST_CLUSTER_ID - .as_ref() - .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), - ); - let engine_config = do_default_engine_config(&context, logger()); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let docr = container_registry_digital_ocean(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match docr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match docr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match docr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match docr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); - }) -} - #[cfg(feature = "test-do-self-hosted")] #[named] #[test] diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index 5a262127..b91fbcb7 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -2,9 +2,7 @@ extern crate test_utilities; use self::test_utilities::common::ClusterDomain; use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; diff --git a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs index 6f1a7dfd..c1aaf385 100644 --- a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs +++ b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs @@ -2,11 +2,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::DO; -use test_utilities::digitalocean::{do_default_engine_config, DO_KUBERNETES_VERSION, DO_TEST_REGION}; +use test_utilities::digitalocean::do_default_engine_config; use tracing::{span, Level}; -use self::test_utilities::common::Cluster; use qovery_engine::transaction::{Transaction, TransactionResult}; // Warning: This test shouldn't be ran by CI diff --git a/tests/docker/multi_stage_simple/Dockerfile.buildkit b/tests/docker/multi_stage_simple/Dockerfile.buildkit new file mode 100644 index 00000000..5219fdf1 --- /dev/null +++ b/tests/docker/multi_stage_simple/Dockerfile.buildkit @@ -0,0 +1,10 @@ +FROM golang:1.16 AS build + +# ../ is not valid if using old docker engine, only allowed with buildkit +COPY ../hello.go /go/src/project/hello.go +WORKDIR /go/src/project +RUN go build hello.go + +FROM scratch +COPY --from=build /go/src/project/hello /bin/hello +ENTRYPOINT ["/bin/hello"] diff --git a/tests/docker/multi_stage_simple/hello.go b/tests/docker/multi_stage_simple/hello.go new file mode 100644 index 00000000..a932edea --- /dev/null +++ b/tests/docker/multi_stage_simple/hello.go @@ -0,0 +1,7 @@ +package main + +import "fmt" + +func main() { + fmt.Println("hello world") +} diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index fce2203f..ce1f918a 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,7 +1,6 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; -use qovery_engine::build_platform::Image; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use test_utilities::utilities::logger; @@ -49,17 +48,7 @@ fn test_get_registry_namespace() { logger(), ); - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; - + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); @@ -108,16 +97,7 @@ fn test_create_registry_namespace() { logger(), ); - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; + let image = registry_name.to_string(); // execute: debug!("test_create_registry_namespace - {}", region); @@ -160,17 +140,7 @@ fn test_delete_registry_namespace() { logger(), ); - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; - + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); @@ -207,17 +177,7 @@ fn test_get_or_create_registry_namespace() { logger(), ); - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; - + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index acb7c597..e2f1ef13 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -3,7 +3,7 @@ use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, + Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, }; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ @@ -12,12 +12,11 @@ use test_utilities::utilities::{ }; use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use test_utilities::common::test_db; use test_utilities::common::{database_test_environment, Infrastructure}; -use test_utilities::common::{test_db, working_minimal_environment}; use test_utilities::scaleway::{ - clean_environments, scw_default_engine_config, SCW_KUBERNETES_VERSION, SCW_MANAGED_DATABASE_DISK_TYPE, - SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, - SCW_TEST_ZONE, + clean_environments, scw_default_engine_config, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, + SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, SCW_TEST_ZONE, }; /** diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 7b329967..979bf888 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -6,16 +6,12 @@ use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; use test_utilities::common::Infrastructure; -use test_utilities::scaleway::{container_registry_scw, scw_default_engine_config, SCW_KUBERNETES_VERSION}; -use test_utilities::utilities::build_platform_local_docker; +use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on Scaleway infrastructure. @@ -81,97 +77,6 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { }) } -#[cfg(feature = "test-scw-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .SCALEWAY_TEST_ORGANIZATION_ID - .as_ref() - .expect("SCALEWAY_TEST_ORGANIZATION_ID") - .as_str(), - secrets - .SCALEWAY_TEST_CLUSTER_ID - .as_ref() - .expect("SCALEWAY_TEST_CLUSTER_ID") - .as_str(), - ); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let scr = container_registry_scw(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match scr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match scr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match scr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match scr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); - }) -} - #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 58e5c9a0..0e6075be 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -1,9 +1,7 @@ extern crate test_utilities; use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::scaleway::application::ScwZone; diff --git a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs index 19b8753e..70816f19 100644 --- a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs +++ b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs @@ -2,11 +2,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; -use test_utilities::scaleway::{scw_default_engine_config, SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; +use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, Level}; -use self::test_utilities::common::Cluster; -use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::transaction::{Transaction, TransactionResult}; // Warning: This test shouldn't be ran by CI From 1780ce6ce1ad0d6b15b38f13f49e6b4631c38b07 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 10:50:08 +0100 Subject: [PATCH 15/85] To trigger CI --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8b137891..139597f9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1 +1,2 @@ + From 2de28a8d8423fb6299a6ed9341927c8a81b80d92 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 10:54:21 +0100 Subject: [PATCH 16/85] Dumb PR for tests --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 139597f9..b28b04f6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,2 +1,3 @@ + From fc9fe1b7e5cb56802038d9ed9e6b56abd116bb6e Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 11:57:18 +0100 Subject: [PATCH 17/85] Fix for tests --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- src/cmd/docker.rs | 16 +++++++++++++++- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00a3bf2e..aefb2a9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -944,9 +944,9 @@ checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "git2" -version = "0.13.25" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" +checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" dependencies = [ "bitflags", "libc", @@ -1445,9 +1445,9 @@ checksum = "a7f823d141fe0a24df1e23b4af4e3c7ba9e5966ec514ea068c93024aa7deb765" [[package]] name = "libgit2-sys" -version = "0.12.26+1.3.0" +version = "0.13.2+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index abfed16b..533819ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] chrono = "0.4.19" cmd_lib = "1.0.13" -git2 = "0.13.25" +git2 = "0.14.2" walkdir = "2.3.2" itertools = "0.10.0" base64 = "0.13.0" diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 5765405b..62ad21b4 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -2,8 +2,10 @@ use crate::cmd::command::{CommandError, QoveryCommand}; use crate::errors::EngineError; use crate::events::EventDetails; use chrono::Duration; +use std::env; use std::path::Path; use std::process::ExitStatus; +use std::time::{SystemTime, UNIX_EPOCH}; use url::Url; #[derive(thiserror::Error, Debug)] @@ -99,11 +101,23 @@ impl Docker { // In order to be able to use --cache-from --cache-to for buildkit, // we need to create our specific builder, which is not the default one (aka: the docker one) + let builder_name = if env::var_os("CI").is_some() { + format!( + "qovery-engine-{}", + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("invalid timestamp") + .as_secs() + ) + } else { + "qovery-engine".to_string() + }; + let args = vec![ "buildx", "create", "--name", - "qovery-engine", + &builder_name, "--driver-opt", "network=host", "--use", From f176cf3befdc615402155e89bab0e5ab10c0f7a8 Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Thu, 17 Mar 2022 12:07:50 +0100 Subject: [PATCH 18/85] chore: releas pleco v0.11.1 (#646) --- lib/common/bootstrap/charts/pleco/Chart.yaml | 4 ++-- .../bootstrap/charts/pleco/templates/deployment.yaml | 9 +++++++++ lib/common/bootstrap/charts/pleco/values-aws.yaml | 3 +++ lib/common/bootstrap/charts/pleco/values.yaml | 2 +- lib/helm-freeze.yaml | 2 +- 5 files changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/common/bootstrap/charts/pleco/Chart.yaml b/lib/common/bootstrap/charts/pleco/Chart.yaml index 224f4ebd..47a0c5d9 100644 --- a/lib/common/bootstrap/charts/pleco/Chart.yaml +++ b/lib/common/bootstrap/charts/pleco/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 -appVersion: 0.10.4 +appVersion: 0.11.1 description: Automatically removes Cloud managed services and Kubernetes resources based on tags with TTL home: https://github.com/Qovery/pleco icon: https://github.com/Qovery/pleco/raw/main/assets/pleco_logo.png name: pleco type: application -version: 0.10.4 +version: 0.11.1 diff --git a/lib/common/bootstrap/charts/pleco/templates/deployment.yaml b/lib/common/bootstrap/charts/pleco/templates/deployment.yaml index 89f3b959..693d634d 100644 --- a/lib/common/bootstrap/charts/pleco/templates/deployment.yaml +++ b/lib/common/bootstrap/charts/pleco/templates/deployment.yaml @@ -94,6 +94,15 @@ spec: {{ if or (eq .Values.awsFeatures.ecr true)}} - --enable-ecr {{ end }} + {{ if or (eq .Values.awsFeatures.sfn true)}} + - --enable-sfn + {{ end }} + {{ if or (eq .Values.awsFeatures.sqs true)}} + - --enable-sqs + {{ end }} + {{ if or (eq .Values.awsFeatures.lambda true)}} + - --enable-lambda + {{ end }} {{- end }} # Scaleway features diff --git a/lib/common/bootstrap/charts/pleco/values-aws.yaml b/lib/common/bootstrap/charts/pleco/values-aws.yaml index 089e5ee5..23092d86 100644 --- a/lib/common/bootstrap/charts/pleco/values-aws.yaml +++ b/lib/common/bootstrap/charts/pleco/values-aws.yaml @@ -44,6 +44,9 @@ awsFeatures: iam: true sshKeys: true ecr: true + sfn: true + sqs: true + lambda: true resources: limits: diff --git a/lib/common/bootstrap/charts/pleco/values.yaml b/lib/common/bootstrap/charts/pleco/values.yaml index 09b4e135..322d352b 100644 --- a/lib/common/bootstrap/charts/pleco/values.yaml +++ b/lib/common/bootstrap/charts/pleco/values.yaml @@ -3,7 +3,7 @@ replicaCount: 1 image: repository: qoveryrd/pleco pullPolicy: IfNotPresent - plecoImageTag: "0.10.4" + plecoImageTag: "0.11.1" cloudProvider: "" diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 9035b942..37e35087 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -70,7 +70,7 @@ charts: dest: services no_sync: true - name: pleco - version: 0.10.4 + version: 0.11.1 repo_name: pleco - name: do-k8s-token-rotate version: 0.1.3 From 869194c085be7938faf8fac6267c2a5b50c5f20f Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 12:44:07 +0100 Subject: [PATCH 19/85] Fix for CI --- src/cmd/docker.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 62ad21b4..78c1e10c 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -2,10 +2,8 @@ use crate::cmd::command::{CommandError, QoveryCommand}; use crate::errors::EngineError; use crate::events::EventDetails; use chrono::Duration; -use std::env; use std::path::Path; use std::process::ExitStatus; -use std::time::{SystemTime, UNIX_EPOCH}; use url::Url; #[derive(thiserror::Error, Debug)] @@ -101,25 +99,14 @@ impl Docker { // In order to be able to use --cache-from --cache-to for buildkit, // we need to create our specific builder, which is not the default one (aka: the docker one) - let builder_name = if env::var_os("CI").is_some() { - format!( - "qovery-engine-{}", - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("invalid timestamp") - .as_secs() - ) - } else { - "qovery-engine".to_string() - }; - let args = vec![ "buildx", "create", "--name", - &builder_name, + "qovery-engine", "--driver-opt", "network=host", + "--bootstrap", "--use", ]; let _ = docker_exec( @@ -484,7 +471,7 @@ pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> Engi // start a local registry to run this test // docker run --rm -ti -p 5000:5000 --name registry registry:2 -#[cfg(feature = "test-with-docker")] +//#[cfg(feature = "test-with-docker")] #[cfg(test)] mod tests { use crate::cmd::docker::{ContainerImage, Docker, DockerError}; From 943ed771faabc6f54efe609d808d66ea936134ec Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 12:44:55 +0100 Subject: [PATCH 20/85] Fix for CI --- src/cmd/docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 78c1e10c..f1d8ebf1 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -471,7 +471,7 @@ pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> Engi // start a local registry to run this test // docker run --rm -ti -p 5000:5000 --name registry registry:2 -//#[cfg(feature = "test-with-docker")] +#[cfg(feature = "test-with-docker")] #[cfg(test)] mod tests { use crate::cmd::docker::{ContainerImage, Docker, DockerError}; From 6ae2186e4fae0fcfd18305b1a87351ae66fde08d Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 14:13:15 +0100 Subject: [PATCH 21/85] Fix test for CI --- src/container_registry/docr.rs | 2 ++ src/transaction.rs | 50 +++++++++++++++++++++++++++++++--- tests/aws/aws_kubernetes.rs | 6 +--- 3 files changed, 49 insertions(+), 9 deletions(-) diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 00d8060f..ff0c14ab 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -26,6 +26,7 @@ pub struct DOCR { pub name: String, pub api_key: String, pub id: String, + pub registry_info: Option, pub listeners: Listeners, pub logger: Box, } @@ -37,6 +38,7 @@ impl DOCR { name: name.into(), api_key: api_key.into(), id: id.into(), + registry_info: None, listeners: vec![], logger, } diff --git a/src/transaction.rs b/src/transaction.rs index 404b044f..a21b434f 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -183,10 +183,52 @@ impl<'a> Transaction<'a> { Ok(()) } - /// This function is a wrapper to correctly revert all changes of an attempted deployment AND - /// if a failover environment is provided, then rollback. - fn rollback_environment(&self, _environment_action: &EnvironmentAction) -> Result<(), RollbackError> { - Ok(()) + // Warning: This function function does not revert anything, it just there to grab info from kube and services if it fails + // FIXME: Cleanup this, qe_environment should not be rebuilt at this step + fn rollback_environment(&self, environment_action: &EnvironmentAction) -> Result<(), RollbackError> { + let registry_info = self + .engine + .container_registry() + .login() + .map_err(|err| RollbackError::CommitError(err))?; + + let qe_environment = |environment: &Environment| { + let qe_environment = environment.to_qe_environment( + self.engine.context(), + self.engine.cloud_provider(), + ®istry_info, + self.logger.clone(), + ); + + qe_environment + }; + + match environment_action { + EnvironmentAction::Environment(te) => { + // revert changes but there is no failover environment + let target_qe_environment = qe_environment(te); + + let action = match te.action { + Action::Create => self + .engine + .kubernetes() + .deploy_environment_error(&target_qe_environment), + Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), + Action::Delete => self + .engine + .kubernetes() + .delete_environment_error(&target_qe_environment), + Action::Nothing => Ok(()), + }; + + let _ = match action { + Ok(_) => {} + Err(err) => return Err(RollbackError::CommitError(err)), + }; + + Err(RollbackError::NoFailoverEnvironment) + } + } } pub fn commit(mut self) -> TransactionResult { diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index c62d6439..53f790b8 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -1,9 +1,7 @@ extern crate test_utilities; use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; @@ -98,9 +96,7 @@ fn create_and_destroy_eks_cluster_in_us_east_2() { #[named] #[test] fn create_pause_and_destroy_eks_cluster_in_us_east_2() { - let secrets = FuncTestsSecrets::new(); let region = "us-east-2".to_string(); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); create_and_destroy_eks_cluster( region, ClusterTestType::WithPause, From 19686830c8f642a09e7bf71fc59caf1d1603b4b4 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 15:53:46 +0100 Subject: [PATCH 22/85] Fix sticky sessions tests --- tests/aws/aws_environment.rs | 4 ++++ tests/digitalocean/do_environment.rs | 4 ++++ tests/scaleway/scw_environment.rs | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index aaed2d71..540d482e 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -10,6 +10,8 @@ use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; +use std::thread; +use std::time::Duration; use test_utilities::aws::aws_default_engine_config; use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; @@ -948,6 +950,8 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(5)); // checking if cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 8d965c39..51ede847 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -10,6 +10,8 @@ use qovery_engine::cloud_provider::Kind; use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; +use std::thread; +use std::time::Duration; use test_utilities::common::Infrastructure; use test_utilities::digitalocean::do_default_engine_config; use test_utilities::utilities::context; @@ -833,6 +835,8 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(5)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 979bf888..76ccd061 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -10,6 +10,8 @@ use qovery_engine::cloud_provider::Kind; use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; +use std::thread; +use std::time::Duration; use test_utilities::common::Infrastructure; use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, warn, Level}; @@ -953,6 +955,8 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(5)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); From 84f0e79e045205a1e4c44bfe9886ee9a03bb0c0f Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 16:36:13 +0100 Subject: [PATCH 23/85] Refacto to share the lambda and use it multiple time --- src/build_platform/local_docker.rs | 10 +-- .../aws/kubernetes/helm_charts.rs | 12 +-- .../digitalocean/kubernetes/cidr.rs | 15 ++-- src/cloud_provider/helm.rs | 12 +-- src/cmd/command.rs | 31 ++++--- src/cmd/docker.rs | 90 +++++++++---------- src/cmd/helm.rs | 41 +++++---- src/cmd/kubectl.rs | 74 +++++++-------- src/cmd/terraform.rs | 4 +- test_utilities/Cargo.lock | 8 +- 10 files changed, 152 insertions(+), 145 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 0079c5d0..47abd851 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -134,7 +134,7 @@ impl LocalDocker { .collect::>(), &image_cache, true, - |line| { + &mut |line| { self.logger.log( LogLevel::Info, EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), @@ -149,7 +149,7 @@ impl LocalDocker { self.context.execution_id(), )); }, - |line| { + &mut |line| { self.logger.log( LogLevel::Info, EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), @@ -278,7 +278,7 @@ impl LocalDocker { let mut cmd = QoveryCommand::new("pack", &buildpacks_args, &self.get_docker_host_envs()); exit_status = cmd.exec_with_abort( Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), - |line| { + &mut |line| { self.logger.log( LogLevel::Info, EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), @@ -293,7 +293,7 @@ impl LocalDocker { self.context.execution_id(), )); }, - |line| { + &mut |line| { self.logger.log( LogLevel::Warning, EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), @@ -682,7 +682,7 @@ fn docker_prune_images(envs: Vec<(&str, &str)>) -> Result<(), CommandError> { let mut errored_commands = vec![]; for prune in all_prunes_commands { let mut cmd = QoveryCommand::new("docker", &prune, &envs); - if let Err(e) = cmd.exec_with_timeout(Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), |_| {}, |_| {}) { + if let Err(e) = cmd.exec_with_timeout(Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), &mut |_| {}, &mut |_| {}) { errored_commands.push(format!("{} {:?}", prune[0], e)); } } diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index e81f342f..b2d4a3d7 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1266,8 +1266,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; let args = vec![ @@ -1285,8 +1285,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; let args = vec![ @@ -1304,8 +1304,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; Ok(()) diff --git a/src/cloud_provider/digitalocean/kubernetes/cidr.rs b/src/cloud_provider/digitalocean/kubernetes/cidr.rs index c5a46fbc..6cad8fd7 100644 --- a/src/cloud_provider/digitalocean/kubernetes/cidr.rs +++ b/src/cloud_provider/digitalocean/kubernetes/cidr.rs @@ -18,15 +18,12 @@ pub fn get_used_cidr_on_region(token: &str) { let mut output_from_cli = String::new(); let mut cmd = QoveryCommand::new("doctl", &vec!["vpcs", "list", "--output", "json", "-t", token], &vec![]); - let _ = cmd.exec_with_output( - |r_out| output_from_cli.push_str(&r_out), - |r_err| { - error!( - "DOCTL CLI error from cmd inserted, please check vpcs list command{}", - r_err - ) - }, - ); + let _ = cmd.exec_with_output(&mut |r_out| output_from_cli.push_str(&r_out), &mut |r_err| { + error!( + "DOCTL CLI error from cmd inserted, please check vpcs list command{}", + r_err + ) + }); let buff = output_from_cli.borrow(); let _array: Vec = serde_json::from_str(&buff).expect("JSON is not well-formatted"); diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index f78fe1c1..ad5f9c57 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -447,8 +447,8 @@ impl HelmChart for CoreDNSConfigChart { format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(), ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; kubectl_exec_with_output( vec![ @@ -461,8 +461,8 @@ impl HelmChart for CoreDNSConfigChart { "meta.helm.sh/release-namespace=kube-system", ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; kubectl_exec_with_output( vec![ @@ -475,8 +475,8 @@ impl HelmChart for CoreDNSConfigChart { "app.kubernetes.io/managed-by=Helm", ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; Ok(()) }; diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 05cea017..041426a9 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -75,16 +75,16 @@ impl QoveryCommand { pub fn exec(&mut self) -> Result<(), CommandError> { self.exec_with_abort( Duration::max_value(), - |line| info!("{}", line), - |line| warn!("{}", line), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), || false, ) } pub fn exec_with_output( &mut self, - stdout_output: STDOUT, - stderr_output: STDERR, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, ) -> Result<(), CommandError> where STDOUT: FnMut(String), @@ -96,8 +96,8 @@ impl QoveryCommand { pub fn exec_with_timeout( &mut self, timeout: Duration, - stdout_output: STDOUT, - stderr_output: STDERR, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, ) -> Result<(), CommandError> where STDOUT: FnMut(String), @@ -109,8 +109,8 @@ impl QoveryCommand { pub fn exec_with_abort( &mut self, timeout: Duration, - mut stdout_output: STDOUT, - mut stderr_output: STDERR, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, should_be_killed: F, ) -> Result<(), CommandError> where @@ -222,10 +222,9 @@ impl QoveryCommand { pub fn run_version_command_for(binary_name: &str) -> String { let mut output_from_cmd = String::new(); let mut cmd = QoveryCommand::new(binary_name, &vec!["--version"], Default::default()); - let _ = cmd.exec_with_output( - |r_out| output_from_cmd.push_str(&r_out), - |r_err| error!("Error executing {}: {}", binary_name, r_err), - ); + let _ = cmd.exec_with_output(&mut |r_out| output_from_cmd.push_str(&r_out), &mut |r_err| { + error!("Error executing {}: {}", binary_name, r_err) + }); output_from_cmd } @@ -283,17 +282,17 @@ mod tests { #[test] fn test_command_with_timeout() { let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); + let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sh", &vec!["-c", "cat /dev/urandom | grep -a --null-data ."], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); + let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sleep", &vec!["1"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); + let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); assert_eq!(ret.is_ok(), true); } @@ -315,7 +314,7 @@ mod tests { let cmd_killer = move || should_kill2.load(Ordering::Acquire); barrier.wait(); - let ret = cmd.exec_with_abort(Duration::max_value(), |_| {}, |_| {}, cmd_killer); + let ret = cmd.exec_with_abort(Duration::max_value(), &mut |_| {}, &mut |_| {}, cmd_killer); assert!(matches!(ret, Err(CommandError::Killed(_)))); } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index f1d8ebf1..41379e17 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -88,8 +88,8 @@ impl Docker { &docker.get_all_envs(&vec![]), Some(Duration::max_value()), &|| false, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ); if let Err(_) = buildx_cmd_exist { return Err(DockerError::InvalidConfig(format!( @@ -114,8 +114,8 @@ impl Docker { &docker.get_all_envs(&vec![]), Some(Duration::max_value()), &|| false, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ); Ok(docker) @@ -151,8 +151,8 @@ impl Docker { &self.get_all_envs(&vec![]), None, &|| false, - |line| info!("{}", line), - |line| warn!("{}", line), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), )?; Ok(()) @@ -166,8 +166,8 @@ impl Docker { &self.get_all_envs(&vec![]), None, &|| false, - |line| info!("{}", line), - |line| warn!("{}", line), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), ); Ok(matches!(ret, Ok(_))) @@ -182,8 +182,8 @@ impl Docker { &self.get_all_envs(&vec![]), None, &|| false, - |line| info!("{}", line), - |line| warn!("{}", line), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), ); match ret { @@ -196,8 +196,8 @@ impl Docker { pub fn pull( &self, image: &ContainerImage, - stdout_output: Stdout, - stderr_output: Stderr, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, timeout: Duration, should_abort: &dyn Fn() -> bool, ) -> Result<(), DockerError> @@ -225,8 +225,8 @@ impl Docker { build_args: &[(&str, &str)], cache: &ContainerImage, push_after_build: bool, - stdout_output: Stdout, - stderr_output: Stderr, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, timeout: Duration, should_abort: &dyn Fn() -> bool, ) -> Result<(), DockerError> @@ -296,8 +296,8 @@ impl Docker { build_args: &[(&str, &str)], cache: &ContainerImage, push_after_build: bool, - stdout_output: Stdout, - stderr_output: Stderr, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, timeout: Duration, should_abort: &dyn Fn() -> bool, ) -> Result<(), DockerError> @@ -308,7 +308,7 @@ impl Docker { info!("Docker build {:?}", image_to_build.image_name()); // Best effort to pull the cache, if it does not exist that's ok too - let _ = self.pull(cache, |_| {}, |_| {}, timeout, should_abort); + let _ = self.pull(cache, stdout_output, stderr_output, timeout, should_abort); let mut args_string: Vec = vec![ "build".to_string(), @@ -345,7 +345,7 @@ impl Docker { )?; if push_after_build { - let _ = self.push(image_to_build, |_| {}, |_| {}, timeout, should_abort)?; + let _ = self.push(image_to_build, stdout_output, stderr_output, timeout, should_abort)?; } Ok(()) @@ -359,8 +359,8 @@ impl Docker { build_args: &[(&str, &str)], cache: &ContainerImage, push_after_build: bool, - stdout_output: Stdout, - stderr_output: Stderr, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, timeout: Duration, should_abort: &dyn Fn() -> bool, ) -> Result<(), DockerError> @@ -415,8 +415,8 @@ impl Docker { pub fn push( &self, image: &ContainerImage, - stdout_output: Stdout, - stderr_output: Stderr, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, timeout: Duration, should_abort: &dyn Fn() -> bool, ) -> Result<(), DockerError> @@ -445,8 +445,8 @@ fn docker_exec( envs: &[(&str, &str)], timeout: Option, should_abort: &dyn Fn() -> bool, - stdout_output: F, - stderr_output: X, + stdout_output: &mut F, + stderr_output: &mut X, ) -> Result<(), DockerError> where F: FnMut(String), @@ -471,7 +471,7 @@ pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> Engi // start a local registry to run this test // docker run --rm -ti -p 5000:5000 --name registry registry:2 -#[cfg(feature = "test-with-docker")] +//#[cfg(feature = "test-with-docker")] #[cfg(test)] mod tests { use crate::cmd::docker::{ContainerImage, Docker, DockerError}; @@ -495,8 +495,8 @@ mod tests { }; let ret = docker.pull( &image, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -511,8 +511,8 @@ mod tests { let ret = docker.pull( &image, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -521,8 +521,8 @@ mod tests { // Should timeout let ret = docker.pull( &image, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::seconds(1), &|| false, ); @@ -552,8 +552,8 @@ mod tests { &vec![], &image_cache, false, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -568,8 +568,8 @@ mod tests { &vec![], &image_cache, false, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -601,8 +601,8 @@ mod tests { &vec![], &image_cache, false, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -616,8 +616,8 @@ mod tests { &vec![], &image_cache, false, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -649,8 +649,8 @@ mod tests { &vec![], &image_cache, false, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -664,8 +664,8 @@ mod tests { let ret = docker.push( &image_to_build, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); @@ -673,8 +673,8 @@ mod tests { let ret = docker.pull( &image_to_build, - |msg| println!("{}", msg), - |msg| eprintln!("{}", msg), + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), Duration::max_value(), &|| false, ); diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index f9a42854..b79b8c8b 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -133,8 +133,8 @@ impl Helm { match helm_exec_with_output( &args, &self.get_all_envs(envs), - |line| stdout.push_str(&line), - |line| stderr.push_str(&line), + &mut |line| stdout.push_str(&line), + &mut |line| stderr.push_str(&line), ) { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { @@ -173,7 +173,9 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), |_| {}, |line| stderr.push_str(&line)) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| { + stderr.push_str(&line) + }) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -206,7 +208,9 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), |_| {}, |line| stderr.push_str(&line)) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| { + stderr.push_str(&line) + }) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -260,8 +264,8 @@ impl Helm { if let Err(cmd_error) = helm_exec_with_output( &helm_args, &self.get_all_envs(envs), - |line| output_string.push(line), - |line| error!("{}", line), + &mut |line| output_string.push(line), + &mut |line| error!("{}", line), ) { return Err(HelmError::CmdError("none".to_string(), LIST, cmd_error)); } @@ -362,10 +366,10 @@ impl Helm { let helm_ret = helm_exec_with_output( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(envs), - |line| { + &mut |line| { debug!("{}", line); }, - |line| { + &mut |line| { stderr_msg.push_str(&line); warn!("chart {}: {}", chart.name, line); }, @@ -470,10 +474,10 @@ impl Helm { let helm_ret = helm_exec_with_output( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(envs), - |line| { + &mut |line| { info!("{}", line); }, - |line| { + &mut |line| { warn!("chart {}: {}", chart.name, line); error_message.push(line); }, @@ -530,15 +534,15 @@ impl Helm { } } -fn helm_exec_with_output( +fn helm_exec_with_output( args: &[&str], envs: &[(&str, &str)], - stdout_output: F, - stderr_output: X, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, ) -> Result<(), CommandError> where - F: FnMut(String), - X: FnMut(String), + STDOUT: FnMut(String), + STDERR: FnMut(String), { // Note: Helm CLI use spf13/cobra lib for the CLI; One function is mainly used to return an error if a command failed. // Helm returns an error each time a command does not succeed as they want. Which leads to handling error with status code 1 @@ -609,7 +613,12 @@ mod tests { #[test] fn check_version() { let mut output = String::new(); - let _ = helm_exec_with_output(&vec!["version"], &vec![], |line| output.push_str(&line), |_line| {}); + let _ = helm_exec_with_output( + &vec!["version"], + &vec![], + &mut |line| output.push_str(&line), + &mut |_line| {}, + ); assert!(output.contains("Version:\"v3.7.2\"")); } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 9aaf9dff..08015214 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -32,8 +32,8 @@ pub enum PodCondition { pub fn kubectl_exec_with_output( args: Vec<&str>, envs: Vec<(&str, &str)>, - stdout_output: F, - stderr_output: X, + stdout_output: &mut F, + stderr_output: &mut X, ) -> Result<(), CommandError> where F: FnMut(String), @@ -82,8 +82,8 @@ where "-o=custom-columns=:.status.containerStatuses..restartCount", ], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; let output_string: String = output_vec.join(""); @@ -109,8 +109,8 @@ where let _ = kubectl_exec_with_output( cmd_args.clone(), envs.clone(), - |line| output_vec.push(line), - |line| err_output_vec.push(line), + &mut |line| output_vec.push(line), + &mut |line| err_output_vec.push(line), )?; let output_string: String = output_vec.join("\n"); @@ -368,8 +368,8 @@ where let result = kubectl_exec_with_output( vec!["get", "namespace", namespace], _envs, - |out| info!("{:?}", out), - |out| warn!("{:?}", out), + &mut |out| info!("{:?}", out), + &mut |out| warn!("{:?}", out), ); result.is_ok() @@ -398,8 +398,8 @@ where let _ = kubectl_exec_with_output( vec!["create", "namespace", namespace], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; } @@ -450,7 +450,9 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs.clone()); - let _ = kubectl_exec_with_output(command_args, _envs, |line| info!("{}", line), |line| error!("{}", line))?; + let _ = kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + })?; Ok(()) } @@ -539,8 +541,8 @@ where let _ = kubectl_exec_with_output( vec!["delete", "namespace", namespace], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -561,8 +563,8 @@ where let _ = kubectl_exec_with_output( vec!["delete", "crd", crd_name], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -584,8 +586,8 @@ where let _ = kubectl_exec_with_output( vec!["-n", namespace, "delete", "secret", secret], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -608,8 +610,8 @@ where let _ = kubectl_exec_with_output( vec!["logs", "--tail", "1000", "-n", namespace, "-l", selector], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; Ok(output_vec) @@ -632,8 +634,8 @@ where let _ = kubectl_exec_with_output( vec!["describe", "pod", "-n", namespace, "-l", selector], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; Ok(output_vec.join("\n")) @@ -686,8 +688,8 @@ where kubectl_exec_with_output( args, environment_variables.clone(), - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), ) } @@ -829,7 +831,7 @@ where let args = vec!["get", "event", arg_namespace.as_str(), "--sort-by='.lastTimestamp'"]; let mut result_ok = String::new(); - match kubectl_exec_with_output(args, environment_variables, |line| result_ok = line, |_| {}) { + match kubectl_exec_with_output(args, environment_variables, &mut |line| result_ok = line, &mut |_| {}) { Ok(()) => Ok(result_ok), Err(err) => Err(err), } @@ -929,8 +931,8 @@ where &replicas_count.to_string(), ], _envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ) } @@ -976,16 +978,16 @@ where selector, ], _envs.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; // deleting pdb in order to be able to upgrade kubernetes version kubectl_exec_with_output( vec!["-n", namespace, "delete", "pdb", "--selector", selector], _envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; let condition = match replicas_count { @@ -1029,8 +1031,8 @@ where "--timeout=300s", ], complete_envs, - |out| info!("{:?}", out), - |out| warn!("{:?}", out), + &mut |out| info!("{:?}", out), + &mut |out| warn!("{:?}", out), ) } @@ -1175,8 +1177,8 @@ where pod_to_be_deleted.metadata.namespace.as_str(), ], complete_envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ) { Ok(_) => Ok(pod_to_be_deleted), Err(e) => Err(CommandError::new(e.message(), None)), @@ -1196,8 +1198,8 @@ where let _ = kubectl_exec_with_output( args.clone(), _envs.clone(), - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; let output_string: String = output_vec.join(""); diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index d3cab515..44f1a220 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -200,11 +200,11 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co cmd.set_current_dir(root_dir); let result = cmd.exec_with_output( - |line| { + &mut |line| { info!("{}", line); stdout.push(line); }, - |line| { + &mut |line| { error!("{}", line); stderr.push(line); }, diff --git a/test_utilities/Cargo.lock b/test_utilities/Cargo.lock index 2b2e33bf..46e83ffb 100644 --- a/test_utilities/Cargo.lock +++ b/test_utilities/Cargo.lock @@ -963,9 +963,9 @@ checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "git2" -version = "0.13.25" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" +checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" dependencies = [ "bitflags", "libc", @@ -1464,9 +1464,9 @@ checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "libgit2-sys" -version = "0.12.26+1.3.0" +version = "0.13.2+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" dependencies = [ "cc", "libc", From 0303c860a32ade06e3606212869041847c2b3845 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 17 Mar 2022 16:59:50 +0100 Subject: [PATCH 24/85] Fix for CI --- src/cmd/docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 41379e17..c74fb991 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -471,7 +471,7 @@ pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> Engi // start a local registry to run this test // docker run --rm -ti -p 5000:5000 --name registry registry:2 -//#[cfg(feature = "test-with-docker")] +#[cfg(feature = "test-with-docker")] #[cfg(test)] mod tests { use crate::cmd::docker::{ContainerImage, Docker, DockerError}; From f7c5ff09e8e1533512217825cfe2f4cbac29f5f1 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 16 Mar 2022 22:05:17 +0100 Subject: [PATCH 25/85] feat: add AWS t3a medium support --- src/cloud_provider/aws/kubernetes/node.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cloud_provider/aws/kubernetes/node.rs b/src/cloud_provider/aws/kubernetes/node.rs index 8f0bdd0f..953cdbd8 100644 --- a/src/cloud_provider/aws/kubernetes/node.rs +++ b/src/cloud_provider/aws/kubernetes/node.rs @@ -10,6 +10,7 @@ pub enum AwsInstancesType { T2Xlarge, // 4 cores 16Gb RAM T3Large, // 2 cores 8Gb RAM T3Xlarge, // 4 cores 16Gb RAM + T3aMedium, // 2 cores 4Gb RAM T3aLarge, // 2 cores 8Gb RAM T3a2xlarge, // 8 cores 32Gb RAM } @@ -21,6 +22,7 @@ impl InstanceType for AwsInstancesType { AwsInstancesType::T2Xlarge => "t2x.large", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", + AwsInstancesType::T3aMedium => "t3a.medium", AwsInstancesType::T3aLarge => "t3a.large", AwsInstancesType::T3a2xlarge => "t3a.2xlarge", } @@ -35,6 +37,7 @@ impl AwsInstancesType { AwsInstancesType::T2Xlarge => "t2x.large", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", + AwsInstancesType::T3aMedium => "t3a.medium", AwsInstancesType::T3aLarge => "t3a.large", AwsInstancesType::T3a2xlarge => "t3a.2xlarge", } @@ -48,6 +51,7 @@ impl fmt::Display for AwsInstancesType { AwsInstancesType::T2Xlarge => write!(f, "t2x.large"), AwsInstancesType::T3Large => write!(f, "t3.large"), AwsInstancesType::T3Xlarge => write!(f, "t3.xlarge"), + AwsInstancesType::T3aMedium => write!(f, "t3a.medium"), AwsInstancesType::T3aLarge => write!(f, "t3a.large"), AwsInstancesType::T3a2xlarge => write!(f, "t3a.2xlarge"), } @@ -63,6 +67,7 @@ impl FromStr for AwsInstancesType { "t2x.large" => Ok(AwsInstancesType::T2Xlarge), "t3.large" => Ok(AwsInstancesType::T3Large), "t3.xlarge" => Ok(AwsInstancesType::T3Xlarge), + "t3a.medium" => Ok(AwsInstancesType::T3aMedium), "t3a.large" => Ok(AwsInstancesType::T3aLarge), "t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge), _ => { From 32c998b1b9a716afa8c87294fed58f2f0a1d2b45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 18 Mar 2022 09:18:42 +0100 Subject: [PATCH 26/85] Allow to customize how to abort a command (#650) --- src/build_platform/local_docker.rs | 16 +-- src/cmd/command.rs | 156 +++++++++++++++++++---------- src/cmd/docker.rs | 99 ++++++------------ src/cmd/helm.rs | 3 +- src/cmd/kubectl.rs | 3 +- 5 files changed, 144 insertions(+), 133 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 47abd851..4277ea84 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -1,15 +1,15 @@ use std::io::{Error, ErrorKind}; use std::path::Path; +use std::time::Duration; use std::{env, fs}; -use chrono::Duration; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; -use crate::cmd::command::QoveryCommand; +use crate::cmd::command::{CommandKiller, QoveryCommand}; use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; @@ -20,7 +20,7 @@ use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; -const BUILD_DURATION_TIMEOUT_MIN: i64 = 30; +const BUILD_DURATION_TIMEOUT_SEC: u64 = 30 * 60; /// https://buildpacks.io/ const BUILDPACKS_BUILDERS: [&str; 1] = [ @@ -164,8 +164,7 @@ impl LocalDocker { self.context.execution_id(), )); }, - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), - is_task_canceled, + &CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled), ); match exit_status { @@ -276,8 +275,8 @@ impl LocalDocker { // buildpacks build let mut cmd = QoveryCommand::new("pack", &buildpacks_args, &self.get_docker_host_envs()); + let cmd_killer = CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled); exit_status = cmd.exec_with_abort( - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), &mut |line| { self.logger.log( LogLevel::Info, @@ -308,7 +307,7 @@ impl LocalDocker { self.context.execution_id(), )); }, - is_task_canceled, + &cmd_killer, ); if exit_status.is_ok() { @@ -682,7 +681,8 @@ fn docker_prune_images(envs: Vec<(&str, &str)>) -> Result<(), CommandError> { let mut errored_commands = vec![]; for prune in all_prunes_commands { let mut cmd = QoveryCommand::new("docker", &prune, &envs); - if let Err(e) = cmd.exec_with_timeout(Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), &mut |_| {}, &mut |_| {}) { + let cmd_killer = CommandKiller::from_timeout(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC)); + if let Err(e) = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &cmd_killer) { errored_commands.push(format!("{} {:?}", prune[0], e)); } } diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 041426a9..1366eb65 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -7,9 +7,8 @@ use std::process::{Child, Command, ExitStatus, Stdio}; use crate::cmd::command::CommandError::{ExecutionError, ExitStatusError, Killed, TimeoutError}; use crate::cmd::command::CommandOutputType::{STDERR, STDOUT}; -use chrono::Duration; use itertools::Itertools; -use std::time::Instant; +use std::time::{Duration, Instant}; use timeout_readwrite::TimeoutReader; enum CommandOutputType { @@ -45,6 +44,63 @@ impl CommandError { } } +#[derive(Debug, Clone)] +pub enum AbortReason { + Timeout(Duration), + Canceled(String), +} +pub struct CommandKiller<'a> { + should_abort: Box Option + 'a>, +} + +impl<'a> CommandKiller<'a> { + pub fn never() -> CommandKiller<'a> { + CommandKiller { + should_abort: Box::new(|| None), + } + } + + pub fn from_timeout(timeout: Duration) -> CommandKiller<'a> { + let now = Instant::now(); + CommandKiller { + should_abort: Box::new(move || { + if now.elapsed() >= timeout { + return Some(AbortReason::Timeout(timeout)); + } + + None + }), + } + } + + pub fn from_cancelable(is_canceled: &'a dyn Fn() -> bool) -> CommandKiller<'a> { + CommandKiller { + should_abort: Box::new(move || { + if is_canceled() { + return Some(AbortReason::Canceled("Task canceled".to_string())); + } + None + }), + } + } + + pub fn from(timeout: Duration, is_canceled: &'a dyn Fn() -> bool) -> CommandKiller<'a> { + let has_timeout = Self::from_timeout(timeout); + let is_canceled = Self::from_cancelable(is_canceled); + CommandKiller { + should_abort: Box::new(move || { + (is_canceled.should_abort)()?; + (has_timeout.should_abort)()?; + None + }), + } + } + + pub fn should_abort(&self) -> Option { + (self.should_abort)() + } +} + pub struct QoveryCommand { command: Command, } @@ -74,10 +130,9 @@ impl QoveryCommand { pub fn exec(&mut self) -> Result<(), CommandError> { self.exec_with_abort( - Duration::max_value(), &mut |line| info!("{}", line), &mut |line| warn!("{}", line), - || false, + &CommandKiller::never(), ) } @@ -90,36 +145,19 @@ impl QoveryCommand { STDOUT: FnMut(String), STDERR: FnMut(String), { - self.exec_with_abort(Duration::max_value(), stdout_output, stderr_output, || false) + self.exec_with_abort(stdout_output, stderr_output, &CommandKiller::never()) } - pub fn exec_with_timeout( + pub fn exec_with_abort( &mut self, - timeout: Duration, stdout_output: &mut STDOUT, stderr_output: &mut STDERR, + abort_notifier: &CommandKiller, ) -> Result<(), CommandError> where STDOUT: FnMut(String), STDERR: FnMut(String), { - self.exec_with_abort(timeout, stdout_output, stderr_output, || false) - } - - pub fn exec_with_abort( - &mut self, - timeout: Duration, - stdout_output: &mut STDOUT, - stderr_output: &mut STDERR, - should_be_killed: F, - ) -> Result<(), CommandError> - where - STDOUT: FnMut(String), - STDERR: FnMut(String), - F: Fn() -> bool, - { - assert!(timeout.num_seconds() > 0, "Timeout cannot be a 0 or negative duration"); - info!("command: {:?}", self.command); let mut cmd_handle = self .command @@ -128,10 +166,8 @@ impl QoveryCommand { .spawn() .map_err(ExecutionError)?; - let process_start_time = Instant::now(); - // Read stdout/stderr until timeout is reached - let reader_timeout = std::time::Duration::from_secs(10.min(timeout.num_seconds() as u64)); + let reader_timeout = std::time::Duration::from_secs(5); let stdout = cmd_handle.stdout.take().ok_or(ExecutionError(Error::new( ErrorKind::BrokenPipe, "Cannot get stdout for command", @@ -160,11 +196,7 @@ impl QoveryCommand { STDERR(Err(err)) => error!("Error on stderr of cmd {:?}: {:?}", self.command, err), } - if should_be_killed() { - break; - } - - if (process_start_time.elapsed().as_secs() as i64) >= timeout.num_seconds() { + if abort_notifier.should_abort().is_some() { break; } } @@ -180,23 +212,24 @@ impl QoveryCommand { } Ok(None) => { // Does the process should be killed ? - if should_be_killed() { - let msg = format!("Killing process {:?}", self.command); - warn!("{}", msg); - Self::kill(&mut cmd_handle); - return Err(Killed(msg)); - } - - // Does the timeout has been reached ? - if (process_start_time.elapsed().as_secs() as i64) >= timeout.num_seconds() { - let msg = format!( - "Killing process {:?} due to timeout {}m reached", - self.command, - timeout.num_minutes() - ); - warn!("{}", msg); - Self::kill(&mut cmd_handle); - return Err(TimeoutError(msg)); + match abort_notifier.should_abort() { + None => {} + Some(AbortReason::Timeout(timeout)) => { + let msg = format!( + "Killing process {:?} due to timeout {}s reached", + self.command, + timeout.as_secs() + ); + warn!("{}", msg); + Self::kill(&mut cmd_handle); + return Err(TimeoutError(msg)); + } + Some(AbortReason::Canceled(_)) => { + let msg = format!("Killing process {:?}", self.command); + warn!("{}", msg); + Self::kill(&mut cmd_handle); + return Err(Killed(msg)); + } } } Err(err) => return Err(ExecutionError(err)), @@ -252,10 +285,10 @@ where #[cfg(test)] mod tests { - use crate::cmd::command::{does_binary_exist, run_version_command_for, CommandError, QoveryCommand}; - use chrono::Duration; + use crate::cmd::command::{does_binary_exist, run_version_command_for, CommandError, CommandKiller, QoveryCommand}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Barrier}; + use std::time::Duration; use std::{thread, time}; #[test] @@ -282,17 +315,29 @@ mod tests { #[test] fn test_command_with_timeout() { let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); + let ret = cmd.exec_with_abort( + &mut |_| {}, + &mut |_| {}, + &CommandKiller::from_timeout(Duration::from_secs(2)), + ); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sh", &vec!["-c", "cat /dev/urandom | grep -a --null-data ."], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); + let ret = cmd.exec_with_abort( + &mut |_| {}, + &mut |_| {}, + &CommandKiller::from_timeout(Duration::from_secs(2)), + ); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sleep", &vec!["1"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), &mut |_| {}, &mut |_| {}); + let ret = cmd.exec_with_abort( + &mut |_| {}, + &mut |_| {}, + &CommandKiller::from_timeout(Duration::from_secs(2)), + ); assert_eq!(ret.is_ok(), true); } @@ -313,8 +358,9 @@ mod tests { }); let cmd_killer = move || should_kill2.load(Ordering::Acquire); + let cmd_killer = CommandKiller::from_cancelable(&cmd_killer); barrier.wait(); - let ret = cmd.exec_with_abort(Duration::max_value(), &mut |_| {}, &mut |_| {}, cmd_killer); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &cmd_killer); assert!(matches!(ret, Err(CommandError::Killed(_)))); } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index c74fb991..d5dd98ef 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -1,7 +1,6 @@ -use crate::cmd::command::{CommandError, QoveryCommand}; +use crate::cmd::command::{CommandError, CommandKiller, QoveryCommand}; use crate::errors::EngineError; use crate::events::EventDetails; -use chrono::Duration; use std::path::Path; use std::process::ExitStatus; use url::Url; @@ -86,10 +85,9 @@ impl Docker { let buildx_cmd_exist = docker_exec( &args, &docker.get_all_envs(&vec![]), - Some(Duration::max_value()), - &|| false, &mut |_| {}, &mut |_| {}, + &CommandKiller::never(), ); if let Err(_) = buildx_cmd_exist { return Err(DockerError::InvalidConfig(format!( @@ -112,10 +110,9 @@ impl Docker { let _ = docker_exec( &args, &docker.get_all_envs(&vec![]), - Some(Duration::max_value()), - &|| false, &mut |_| {}, &mut |_| {}, + &CommandKiller::never(), ); Ok(docker) @@ -149,10 +146,9 @@ impl Docker { docker_exec( &args, &self.get_all_envs(&vec![]), - None, - &|| false, &mut |line| info!("{}", line), &mut |line| warn!("{}", line), + &CommandKiller::never(), )?; Ok(()) @@ -164,10 +160,9 @@ impl Docker { let ret = docker_exec( &vec!["image", "inspect", &image.image_name()], &self.get_all_envs(&vec![]), - None, - &|| false, &mut |line| info!("{}", line), &mut |line| warn!("{}", line), + &CommandKiller::never(), ); Ok(matches!(ret, Ok(_))) @@ -180,10 +175,9 @@ impl Docker { let ret = docker_exec( &vec!["manifest", "inspect", &image.image_name()], &self.get_all_envs(&vec![]), - None, - &|| false, &mut |line| info!("{}", line), &mut |line| warn!("{}", line), + &CommandKiller::never(), ); match ret { @@ -198,22 +192,20 @@ impl Docker { image: &ContainerImage, stdout_output: &mut Stdout, stderr_output: &mut Stderr, - timeout: Duration, - should_abort: &dyn Fn() -> bool, + should_abort: &CommandKiller, ) -> Result<(), DockerError> where Stdout: FnMut(String), Stderr: FnMut(String), { - info!("Docker pull {:?}, timeout: {:?}", image, timeout); + info!("Docker pull {:?}", image); docker_exec( &vec!["pull", &image.image_name()], &self.get_all_envs(&vec![]), - Some(timeout), - should_abort, stdout_output, stderr_output, + should_abort, ) } @@ -227,8 +219,7 @@ impl Docker { push_after_build: bool, stdout_output: &mut Stdout, stderr_output: &mut Stderr, - timeout: Duration, - should_abort: &dyn Fn() -> bool, + should_abort: &CommandKiller, ) -> Result<(), DockerError> where Stdout: FnMut(String), @@ -239,11 +230,6 @@ impl Docker { return Ok(()); } - // if it is already aborted, nothing to do - if (should_abort)() { - return Err(DockerError::Aborted("build".to_string())); - } - // Do some checks if !dockerfile.is_file() { return Err(DockerError::InvalidConfig(format!( @@ -269,7 +255,6 @@ impl Docker { push_after_build, stdout_output, stderr_output, - timeout, should_abort, ) } else { @@ -282,7 +267,6 @@ impl Docker { push_after_build, stdout_output, stderr_output, - timeout, should_abort, ) } @@ -298,8 +282,7 @@ impl Docker { push_after_build: bool, stdout_output: &mut Stdout, stderr_output: &mut Stderr, - timeout: Duration, - should_abort: &dyn Fn() -> bool, + should_abort: &CommandKiller, ) -> Result<(), DockerError> where Stdout: FnMut(String), @@ -308,7 +291,7 @@ impl Docker { info!("Docker build {:?}", image_to_build.image_name()); // Best effort to pull the cache, if it does not exist that's ok too - let _ = self.pull(cache, stdout_output, stderr_output, timeout, should_abort); + let _ = self.pull(cache, stdout_output, stderr_output, should_abort); let mut args_string: Vec = vec![ "build".to_string(), @@ -338,14 +321,13 @@ impl Docker { let _ = docker_exec( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(&vec![]), - Some(timeout), - should_abort, stdout_output, stderr_output, + should_abort, )?; if push_after_build { - let _ = self.push(image_to_build, stdout_output, stderr_output, timeout, should_abort)?; + let _ = self.push(image_to_build, stdout_output, stderr_output, should_abort)?; } Ok(()) @@ -361,8 +343,7 @@ impl Docker { push_after_build: bool, stdout_output: &mut Stdout, stderr_output: &mut Stderr, - timeout: Duration, - should_abort: &dyn Fn() -> bool, + should_abort: &CommandKiller, ) -> Result<(), DockerError> where Stdout: FnMut(String), @@ -405,10 +386,9 @@ impl Docker { docker_exec( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(&vec![]), - Some(timeout), - should_abort, stdout_output, stderr_output, + should_abort, ) } @@ -417,14 +397,13 @@ impl Docker { image: &ContainerImage, stdout_output: &mut Stdout, stderr_output: &mut Stderr, - timeout: Duration, - should_abort: &dyn Fn() -> bool, + should_abort: &CommandKiller, ) -> Result<(), DockerError> where Stdout: FnMut(String), Stderr: FnMut(String), { - info!("Docker push {:?}, timeout: {:?}", image, timeout); + info!("Docker push {:?}", image); let image_names = image.image_names(); let mut args = vec!["push"]; args.extend(image_names.iter().map(|x| x.as_str())); @@ -432,10 +411,9 @@ impl Docker { docker_exec( &args, &self.get_all_envs(&vec![]), - Some(timeout), - should_abort, stdout_output, stderr_output, + should_abort, ) } } @@ -443,18 +421,16 @@ impl Docker { fn docker_exec( args: &[&str], envs: &[(&str, &str)], - timeout: Option, - should_abort: &dyn Fn() -> bool, stdout_output: &mut F, stderr_output: &mut X, + cmd_killer: &CommandKiller, ) -> Result<(), DockerError> where F: FnMut(String), X: FnMut(String), { - let timeout = timeout.unwrap_or_else(|| Duration::max_value()); let mut cmd = QoveryCommand::new("docker", args, envs); - let ret = cmd.exec_with_abort(timeout, stdout_output, stderr_output, should_abort); + let ret = cmd.exec_with_abort(stdout_output, stderr_output, &cmd_killer); match ret { Ok(_) => Ok(()), @@ -474,9 +450,10 @@ pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> Engi #[cfg(feature = "test-with-docker")] #[cfg(test)] mod tests { + use crate::cmd::command::CommandKiller; use crate::cmd::docker::{ContainerImage, Docker, DockerError}; - use chrono::Duration; use std::path::Path; + use std::time::Duration; use url::Url; fn private_registry_url() -> Url { @@ -497,8 +474,7 @@ mod tests { &image, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Err(_))); @@ -513,8 +489,7 @@ mod tests { &image, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -523,8 +498,7 @@ mod tests { &image, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::seconds(1), - &|| false, + &CommandKiller::from_timeout(Duration::from_secs(1)), ); assert!(matches!(ret, Err(DockerError::Timeout(_)))); } @@ -554,8 +528,7 @@ mod tests { false, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -570,8 +543,7 @@ mod tests { false, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Err(_))); @@ -603,8 +575,7 @@ mod tests { false, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -618,8 +589,7 @@ mod tests { false, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -651,8 +621,7 @@ mod tests { false, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -666,8 +635,7 @@ mod tests { &image_to_build, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); @@ -675,8 +643,7 @@ mod tests { &image_to_build, &mut |msg| println!("{}", msg), &mut |msg| eprintln!("{}", msg), - Duration::max_value(), - &|| false, + &CommandKiller::never(), ); assert!(matches!(ret, Ok(_))); } diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index b79b8c8b..5e056975 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -10,7 +10,6 @@ use crate::cmd::helm::HelmError::{CannotRollback, CmdError, InvalidKubeConfig, R use crate::cmd::structs::{HelmChart, HelmListItem}; use crate::errors::{CommandError, EngineError}; use crate::events::EventDetails; -use chrono::Duration; use semver::Version; use serde_derive::Deserialize; use std::fs::File; @@ -548,7 +547,7 @@ where // Helm returns an error each time a command does not succeed as they want. Which leads to handling error with status code 1 // It means that the command successfully ran, but it didn't terminate as expected let mut cmd = QoveryCommand::new("helm", args, envs); - match cmd.exec_with_timeout(Duration::max_value(), stdout_output, stderr_output) { + match cmd.exec_with_output(stdout_output, stderr_output) { Err(err) => Err(CommandError::new(format!("{:?}", err), None)), _ => Ok(()), } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 08015214..ae9f0303 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -1,6 +1,5 @@ use std::path::Path; -use chrono::Duration; use retry::delay::Fibonacci; use retry::OperationResult; use serde::de::DeserializeOwned; @@ -41,7 +40,7 @@ where { let mut cmd = QoveryCommand::new("kubectl", &args, &envs); - if let Err(err) = cmd.exec_with_timeout(Duration::max_value(), stdout_output, stderr_output) { + if let Err(err) = cmd.exec_with_output(stdout_output, stderr_output) { let args_string = args.join(" "); let msg = format!("Error on command: kubectl {}. {:?}", args_string, &err); error!("{}", &msg); From ce3d4a0cfa4b7679905f2b07d14a7b26e9c88d0b Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 09:19:37 +0100 Subject: [PATCH 27/85] Fix CI --- tests/aws/aws_environment.rs | 2 +- tests/digitalocean/do_environment.rs | 2 +- tests/scaleway/scw_environment.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 540d482e..59f489e3 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -951,7 +951,7 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { assert!(matches!(ret, TransactionResult::Ok)); // let time for nginx to reload the config - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs(10)); // checking if cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 51ede847..09a45edf 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -836,7 +836,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { assert!(matches!(result, TransactionResult::Ok)); // let time for nginx to reload the config - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs(10)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 76ccd061..c4df902f 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -956,7 +956,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { assert!(matches!(result, TransactionResult::Ok)); // let time for nginx to reload the config - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs(10)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); From 97f89112190c65e708e11902da2f6354b0234669 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 11:09:44 +0100 Subject: [PATCH 28/85] Fix DO registry login --- src/container_registry/docr.rs | 49 +++++++--- src/container_registry/ecr.rs | 51 ++++++----- src/container_registry/mod.rs | 2 +- .../scaleway_container_registry.rs | 91 +++++++++++-------- test_utilities/src/aws.rs | 1 + test_utilities/src/digitalocean.rs | 1 + test_utilities/src/scaleway.rs | 4 +- tests/scaleway/scw_container_registry.rs | 12 ++- 8 files changed, 131 insertions(+), 80 deletions(-) diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index ff0c14ab..1ac3198b 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -6,6 +6,7 @@ use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; +use crate::cmd::docker::{to_engine_error, Docker}; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, EngineError, Kind}; use crate::errors::CommandError; use crate::events::{EngineEvent, EventDetails, ToTransmitter, Transmitter}; @@ -26,22 +27,49 @@ pub struct DOCR { pub name: String, pub api_key: String, pub id: String, - pub registry_info: Option, + pub registry_info: ContainerRegistryInfo, pub listeners: Listeners, pub logger: Box, } impl DOCR { - pub fn new(context: Context, id: &str, name: &str, api_key: &str, logger: Box) -> Self { - DOCR { + pub fn new( + context: Context, + id: &str, + name: &str, + api_key: &str, + logger: Box, + ) -> Result { + let registry_name = name.to_string(); + let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); + let _ = registry.set_username(&api_key); + let _ = registry.set_password(Some(&api_key)); + let registry_info = ContainerRegistryInfo { + endpoint: registry, + registry_name: name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(move |img_name| format!("{}/{}", registry_name, img_name)), + }; + + let cr = DOCR { context, - name: name.into(), + name: name.to_string(), api_key: api_key.into(), id: id.into(), - registry_info: None, + registry_info, listeners: vec![], logger, + }; + + let event_details = cr.get_event_details(); + let docker = + Docker::new(cr.context.docker_tcp_socket().clone()).map_err(|err| to_engine_error(&event_details, err))?; + if docker.login(&cr.registry_info.endpoint).is_err() { + return Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, + )); } + Ok(cr) } fn create_registry(&self, registry_name: &str) -> Result<(), EngineError> { @@ -196,15 +224,8 @@ impl ContainerRegistry for DOCR { Ok(()) } - fn login(&self) -> Result { - let _ = self.exec_docr_login()?; - let registry_name = self.name.clone(); - Ok(ContainerRegistryInfo { - endpoint: Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(), - registry_name: self.name.to_string(), - registry_docker_json_config: None, - get_image_name: Box::new(move |img_name| format!("{}/{}", registry_name, img_name)), - }) + fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { + Ok(&self.registry_info) } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 28b4fd4c..4944729c 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -30,6 +30,7 @@ pub struct ECR { access_key_id: String, secret_access_key: String, region: Region, + registry_info: Option, listeners: Listeners, logger: Box, } @@ -43,17 +44,39 @@ impl ECR { secret_access_key: &str, region: &str, logger: Box, - ) -> Self { - ECR { + ) -> Result { + let mut cr = ECR { context, id: id.to_string(), name: name.to_string(), access_key_id: access_key_id.to_string(), secret_access_key: secret_access_key.to_string(), region: Region::from_str(region).unwrap(), + registry_info: None, listeners: vec![], logger, - } + }; + + let credentials = cr.get_credentials()?; + let docker = Docker::new(cr.context.docker_tcp_socket().clone()) + .map_err(|err| to_engine_error(&cr.get_event_details(), err))?; + let mut registry_url = Url::parse(credentials.endpoint_url.as_str()).unwrap(); + let _ = registry_url.set_username(&credentials.access_token); + let _ = registry_url.set_password(Some(&credentials.password)); + + let _ = docker + .login(®istry_url) + .map_err(|err| to_engine_error(&cr.get_event_details(), err))?; + + let registry_info = ContainerRegistryInfo { + endpoint: registry_url, + registry_name: cr.name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(|img_name| img_name.to_string()), + }; + + cr.registry_info = Some(registry_info); + Ok(cr) } pub fn credentials(&self) -> StaticProvider { @@ -363,25 +386,9 @@ impl ContainerRegistry for ECR { } } - fn login(&self) -> Result { - let event_details = self.get_event_details(); - let credentials = self.get_credentials()?; - let docker = Docker::new(self.context.docker_tcp_socket().clone()) - .map_err(|err| to_engine_error(&event_details, err))?; - let mut registry_url = Url::parse(credentials.endpoint_url.as_str()).unwrap(); - let _ = registry_url.set_username(&credentials.access_token); - let _ = registry_url.set_password(Some(&credentials.password)); - - let _ = docker - .login(®istry_url) - .map_err(|err| to_engine_error(&event_details, err))?; - - Ok(ContainerRegistryInfo { - endpoint: registry_url, - registry_name: self.name.to_string(), - registry_docker_json_config: None, - get_image_name: Box::new(|img_name| img_name.to_string()), - }) + fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { + // At this point the registry info should be initialize, so unwrap is safe + Ok(&self.registry_info.as_ref().unwrap()) } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index ddf8e56d..d648d29e 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -25,7 +25,7 @@ pub trait ContainerRegistry: Listen + ToTransmitter { // mainly getting creds and calling docker login behind the hood // It is poart of the ContainerRegistry only because DigitalOcean require to call doctl // and that we can't get credentials directly - fn login(&self) -> Result; + fn login(&self) -> Result<&ContainerRegistryInfo, EngineError>; // Some provider require specific action in order to allow container registry // For now it is only digital ocean, that require 2 steps to have registries diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index fcdb03ac..5bb0af0c 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -6,12 +6,12 @@ use std::borrow::Borrow; use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; use crate::cmd::docker; -use crate::cmd::docker::Docker; +use crate::cmd::docker::{to_engine_error, Docker}; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; +use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, Stage, ToTransmitter, Transmitter}; use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; use crate::runtime::block_on; use url::Url; @@ -20,9 +20,9 @@ pub struct ScalewayCR { id: String, name: String, default_project_id: String, - login: String, secret_token: String, zone: ScwZone, + registry_info: ContainerRegistryInfo, docker: Docker, listeners: Listeners, logger: Box, @@ -37,21 +37,58 @@ impl ScalewayCR { default_project_id: &str, zone: ScwZone, logger: Box, - ) -> ScalewayCR { - let docker = Docker::new(context.docker_tcp_socket().clone()).unwrap(); // FIXME: remove unwrap + ) -> Result { + let event_details = EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + Stage::General(GeneralStep::ValidateSystemRequirements), + Transmitter::ContainerRegistry(id.to_string(), name.to_string()), + ); - ScalewayCR { + // Be sure we are logged on the registry + let login = "nologin".to_string(); + let secret_token = secret_token.to_string(); + + let mut registry = Url::parse(&format!("https://rg.{}.scw.cloud", zone.region())).unwrap(); + let _ = registry.set_username(&login); + let _ = registry.set_password(Some(&secret_token)); + + let docker = + Docker::new(context.docker_tcp_socket().clone()).map_err(|err| to_engine_error(&event_details, err))?; + if docker.login(®istry).is_err() { + return Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, + )); + } + + let registry_info = ContainerRegistryInfo { + endpoint: registry, + registry_name: name.to_string(), + registry_docker_json_config: Some(Self::get_docker_json_config_raw( + &login, + &secret_token, + zone.region().as_str(), + )), + get_image_name: Box::new(move |img_name| format!("{}/{}", img_name, img_name)), + }; + + let cr = ScalewayCR { context, id: id.to_string(), name: name.to_string(), default_project_id: default_project_id.to_string(), - login: "nologin".to_string(), - secret_token: secret_token.to_string(), + secret_token, zone, + registry_info, docker, listeners: Vec::new(), logger, - } + }; + + Ok(cr) } fn get_configuration(&self) -> scaleway_api_rs::apis::configuration::Configuration { @@ -291,12 +328,12 @@ impl ScalewayCR { self.create_registry_namespace(namespace_name) } - fn get_docker_json_config_raw(&self) -> String { + fn get_docker_json_config_raw(login: &str, secret_token: &str, region: &str) -> String { base64::encode( format!( r#"{{"auths":{{"rg.{}.scw.cloud":{{"auth":"{}"}}}}}}"#, - self.zone.region().as_str(), - base64::encode(format!("nologin:{}", self.secret_token).as_bytes()) + region, + base64::encode(format!("{}:{}", login, secret_token).as_bytes()) ) .as_bytes(), ) @@ -330,24 +367,8 @@ impl ContainerRegistry for ScalewayCR { Ok(()) } - fn login(&self) -> Result { - let event_details = self.get_event_details(); - let mut registry = Url::parse(&format!("https://rg.{}.scw.cloud", self.zone.region())).unwrap(); - let _ = registry.set_username(&self.login); - let _ = registry.set_password(Some(&self.secret_token)); - - if self.docker.login(®istry).is_err() { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - } - - Ok(ContainerRegistryInfo { - endpoint: registry, - registry_name: self.name.to_string(), - registry_docker_json_config: Some(self.get_docker_json_config_raw()), - get_image_name: Box::new(move |img_name| format!("{}/{}", img_name, img_name)), - }) + fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { + Ok(&self.registry_info) } fn create_registry(&self) -> Result<(), EngineError> { @@ -361,14 +382,8 @@ impl ContainerRegistry for ScalewayCR { } fn does_image_exists(&self, image: &Image) -> bool { - let info = if let Ok(url) = self.login() { - url - } else { - return false; - }; - let image = docker::ContainerImage { - registry: info.endpoint, + registry: self.registry_info.endpoint.clone(), name: image.name().clone(), tags: vec![image.tag.clone()], }; diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 8276d917..d6540cdb 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -51,6 +51,7 @@ pub fn container_registry_ecr(context: &Context) -> ECR { secrets.AWS_DEFAULT_REGION.unwrap().as_str(), logger(), ) + .unwrap() } pub fn aws_default_engine_config(context: &Context, logger: Box) -> EngineConfig { diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 656db11e..03a8920d 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -40,6 +40,7 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), logger(), ) + .unwrap() } pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index c6149d54..801d3627 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -61,6 +61,7 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { SCW_TEST_ZONE, logger(), ) + .unwrap() } pub fn scw_default_engine_config(context: &Context, logger: Box) -> EngineConfig { @@ -237,7 +238,8 @@ pub fn clean_environments( project_id.as_str(), zone, logger(), - ); + ) + .unwrap(); // delete images created in registry let registry_url = container_registry_client.login()?; diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index ce1f918a..dd2fee90 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -46,7 +46,8 @@ fn test_get_registry_namespace() { scw_default_project_id.as_str(), region, logger(), - ); + ) + .unwrap(); let image = registry_name.to_string(); container_registry @@ -95,7 +96,8 @@ fn test_create_registry_namespace() { scw_default_project_id.as_str(), region, logger(), - ); + ) + .unwrap(); let image = registry_name.to_string(); @@ -138,7 +140,8 @@ fn test_delete_registry_namespace() { scw_default_project_id.as_str(), region, logger(), - ); + ) + .unwrap(); let image = registry_name.to_string(); container_registry @@ -175,7 +178,8 @@ fn test_get_or_create_registry_namespace() { scw_default_project_id.as_str(), region, logger(), - ); + ) + .unwrap(); let image = registry_name.to_string(); container_registry From 7eacdbc80f2ce9527d2353e7f9d856d781d16a3b Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 11:28:35 +0100 Subject: [PATCH 29/85] Simplify --- src/container_registry/docr.rs | 4 ++-- src/container_registry/ecr.rs | 4 ++-- src/container_registry/mod.rs | 7 ++----- src/container_registry/scaleway_container_registry.rs | 4 ++-- src/transaction.rs | 10 +++------- test_utilities/src/scaleway.rs | 2 +- 6 files changed, 12 insertions(+), 19 deletions(-) diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 1ac3198b..4b70092a 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -224,8 +224,8 @@ impl ContainerRegistry for DOCR { Ok(()) } - fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { - Ok(&self.registry_info) + fn registry_info(&self) -> &ContainerRegistryInfo { + &self.registry_info } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 4944729c..f9906e4b 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -386,9 +386,9 @@ impl ContainerRegistry for ECR { } } - fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { + fn registry_info(&self) -> &ContainerRegistryInfo { // At this point the registry info should be initialize, so unwrap is safe - Ok(&self.registry_info.as_ref().unwrap()) + self.registry_info.as_ref().unwrap() } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index d648d29e..dafec506 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -21,11 +21,8 @@ pub trait ContainerRegistry: Listen + ToTransmitter { } fn is_valid(&self) -> Result<(), EngineError>; - // Login into the registry and setup everything for it - // mainly getting creds and calling docker login behind the hood - // It is poart of the ContainerRegistry only because DigitalOcean require to call doctl - // and that we can't get credentials directly - fn login(&self) -> Result<&ContainerRegistryInfo, EngineError>; + // Get info for this registry, url endpoint with login/password, image name convention, ... + fn registry_info(&self) -> &ContainerRegistryInfo; // Some provider require specific action in order to allow container registry // For now it is only digital ocean, that require 2 steps to have registries diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 5bb0af0c..ad124b4d 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -367,8 +367,8 @@ impl ContainerRegistry for ScalewayCR { Ok(()) } - fn login(&self) -> Result<&ContainerRegistryInfo, EngineError> { - Ok(&self.registry_info) + fn registry_info(&self) -> &ContainerRegistryInfo { + &self.registry_info } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/transaction.rs b/src/transaction.rs index a21b434f..bb3af39a 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -120,7 +120,7 @@ impl<'a> Transaction<'a> { // Do setup of registry and be sure we are login to the registry let cr_registry = self.engine.container_registry(); let _ = cr_registry.create_registry()?; - let registry = self.engine.container_registry().login()?; + let registry = self.engine.container_registry().registry_info(); for app in apps_to_build.into_iter() { let app_build = app.to_build(®istry); @@ -186,11 +186,7 @@ impl<'a> Transaction<'a> { // Warning: This function function does not revert anything, it just there to grab info from kube and services if it fails // FIXME: Cleanup this, qe_environment should not be rebuilt at this step fn rollback_environment(&self, environment_action: &EnvironmentAction) -> Result<(), RollbackError> { - let registry_info = self - .engine - .container_registry() - .login() - .map_err(|err| RollbackError::CommitError(err))?; + let registry_info = self.engine.container_registry().registry_info(); let qe_environment = |environment: &Environment| { let qe_environment = environment.to_qe_environment( @@ -424,7 +420,7 @@ impl<'a> Transaction<'a> { EnvironmentAction::Environment(te) => te, }; - let registry_info = self.engine.container_registry().login().unwrap(); + let registry_info = self.engine.container_registry().registry_info(); let qe_environment = target_environment.to_qe_environment( self.engine.context(), self.engine.cloud_provider(), diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 801d3627..8799229b 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -242,7 +242,7 @@ pub fn clean_environments( .unwrap(); // delete images created in registry - let registry_url = container_registry_client.login()?; + let registry_url = container_registry_client.registry_info(); for env in environments.iter() { for image in env .applications From 314348335feb212d82a924031addfb1ae4d85311 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 11:59:39 +0100 Subject: [PATCH 30/85] Use correct secret name in deployment for digital ocean registry --- src/cloud_provider/digitalocean/application.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index bcb3454b..081cd654 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -240,7 +240,10 @@ impl Service for Application { context.insert("environment_variables", &environment_variables); context.insert("ports", &self.ports); context.insert("is_registry_secret", &true); - context.insert("registry_secret", self.image.registry_host()); + + // This is specific to digital ocean as it is them that create the registry secret + // we don't have the hand on it + context.insert("registry_secret", &self.image.registry_name); let storage = self .storage From 60e4865d6a701df9e3388514d8d096150b02d077 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 13:43:29 +0100 Subject: [PATCH 31/85] Avoid creating multiple docker handler --- src/build_platform/local_docker.rs | 7 ++----- src/cmd/docker.rs | 1 + src/container_registry/docr.rs | 5 +---- src/container_registry/ecr.rs | 8 ++++---- src/container_registry/scaleway_container_registry.rs | 9 ++------- src/models.rs | 6 +++++- test_utilities/src/utilities.rs | 3 +++ 7 files changed, 18 insertions(+), 21 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 4277ea84..aa747159 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -10,7 +10,7 @@ use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentia use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; -use crate::cmd::docker::{ContainerImage, Docker, DockerError}; +use crate::cmd::docker::{ContainerImage, DockerError}; use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; @@ -33,7 +33,6 @@ const BUILDPACKS_BUILDERS: [&str; 1] = [ /// use Docker in local pub struct LocalDocker { context: Context, - docker: Docker, id: String, name: String, listeners: Listeners, @@ -47,10 +46,8 @@ impl LocalDocker { name: &str, logger: Box, ) -> Result> { - let docker = Docker::new_with_options(true, context.docker_tcp_socket().clone())?; Ok(LocalDocker { context, - docker, id: id.to_string(), name: name.to_string(), listeners: vec![], @@ -124,7 +121,7 @@ impl LocalDocker { }) .collect::>(); - let exit_status = self.docker.build( + let exit_status = self.context.docker.build( &Path::new(dockerfile_complete_path), &Path::new(into_dir_docker_style), &image_to_build, diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index d5dd98ef..ccda8881 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -49,6 +49,7 @@ impl ContainerImage { } } +#[derive(Debug, Clone)] pub struct Docker { use_buildkit: bool, common_envs: Vec<(String, String)>, diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 4b70092a..cd686f30 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -6,7 +6,6 @@ use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; -use crate::cmd::docker::{to_engine_error, Docker}; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, EngineError, Kind}; use crate::errors::CommandError; use crate::events::{EngineEvent, EventDetails, ToTransmitter, Transmitter}; @@ -62,9 +61,7 @@ impl DOCR { }; let event_details = cr.get_event_details(); - let docker = - Docker::new(cr.context.docker_tcp_socket().clone()).map_err(|err| to_engine_error(&event_details, err))?; - if docker.login(&cr.registry_info.endpoint).is_err() { + if cr.context.docker.login(&cr.registry_info.endpoint).is_err() { return Err(EngineError::new_client_invalid_cloud_provider_credentials( event_details, )); diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index f9906e4b..63d9e307 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -10,7 +10,7 @@ use rusoto_ecr::{ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient}; use crate::build_platform::Image; -use crate::cmd::docker::{to_engine_error, Docker}; +use crate::cmd::docker::to_engine_error; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; @@ -58,13 +58,13 @@ impl ECR { }; let credentials = cr.get_credentials()?; - let docker = Docker::new(cr.context.docker_tcp_socket().clone()) - .map_err(|err| to_engine_error(&cr.get_event_details(), err))?; let mut registry_url = Url::parse(credentials.endpoint_url.as_str()).unwrap(); let _ = registry_url.set_username(&credentials.access_token); let _ = registry_url.set_password(Some(&credentials.password)); - let _ = docker + let _ = cr + .context + .docker .login(®istry_url) .map_err(|err| to_engine_error(&cr.get_event_details(), err))?; diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index ad124b4d..c2df013d 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -6,7 +6,6 @@ use std::borrow::Borrow; use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; use crate::cmd::docker; -use crate::cmd::docker::{to_engine_error, Docker}; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, Stage, ToTransmitter, Transmitter}; @@ -23,7 +22,6 @@ pub struct ScalewayCR { secret_token: String, zone: ScwZone, registry_info: ContainerRegistryInfo, - docker: Docker, listeners: Listeners, logger: Box, } @@ -56,9 +54,7 @@ impl ScalewayCR { let _ = registry.set_username(&login); let _ = registry.set_password(Some(&secret_token)); - let docker = - Docker::new(context.docker_tcp_socket().clone()).map_err(|err| to_engine_error(&event_details, err))?; - if docker.login(®istry).is_err() { + if context.docker.login(®istry).is_err() { return Err(EngineError::new_client_invalid_cloud_provider_credentials( event_details, )); @@ -83,7 +79,6 @@ impl ScalewayCR { secret_token, zone, registry_info, - docker, listeners: Vec::new(), logger, }; @@ -387,7 +382,7 @@ impl ContainerRegistry for ScalewayCR { name: image.name().clone(), tags: vec![image.tag.clone()], }; - self.docker.does_image_exist_remotely(&image).is_ok() + self.context.docker.does_image_exist_remotely(&image).is_ok() } fn logger(&self) -> &dyn Logger { diff --git a/src/models.rs b/src/models.rs index 128634f2..30bc4881 100644 --- a/src/models.rs +++ b/src/models.rs @@ -21,6 +21,7 @@ use crate::cloud_provider::service::{DatabaseOptions, StatefulService, Stateless use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; +use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::utilities::get_image_tag; @@ -1119,7 +1120,7 @@ impl<'a> ListenersHelper<'a> { } } -#[derive(PartialEq, Eq, Hash, Clone)] +#[derive(Clone)] pub struct Context { organization_id: String, cluster_id: String, @@ -1130,6 +1131,7 @@ pub struct Context { docker_host: Option, features: Vec, metadata: Option, + pub docker: Docker, } #[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)] @@ -1170,6 +1172,7 @@ impl Context { docker_host: Option, features: Vec, metadata: Option, + docker: Docker, ) -> Self { Context { organization_id, @@ -1181,6 +1184,7 @@ impl Context { docker_host, features, metadata, + docker, } } diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 7c8ab263..c004bc8e 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -46,6 +46,7 @@ use crate::digitalocean::{ }; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cmd::command::QoveryCommand; +use qovery_engine::cmd::docker::Docker; use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc}; use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC}; use qovery_engine::errors::CommandError; @@ -62,6 +63,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { let execution_id = execution_id(); let home_dir = std::env::var("WORKSPACE_ROOT_DIR").unwrap_or(home_dir().unwrap().to_str().unwrap().to_string()); let lib_root_dir = std::env::var("LIB_ROOT_DIR").expect("LIB_ROOT_DIR is mandatory"); + let docker = Docker::new(None).expect("Can't init docker"); let metadata = Metadata { dry_run_deploy: Option::from({ @@ -101,6 +103,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { None, enabled_features, Option::from(metadata), + docker, ) } From e3e5585e7017f84ce5caa989cdde125a65c6a640 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 13:51:34 +0100 Subject: [PATCH 32/85] Fix tests --- src/cloud_provider/aws/databases/mongodb.rs | 50 +------------------ src/cloud_provider/aws/databases/mysql.rs | 50 +------------------ .../aws/databases/postgresql.rs | 50 +------------------ src/cloud_provider/aws/databases/redis.rs | 50 +------------------ 4 files changed, 4 insertions(+), 196 deletions(-) diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index e35bcd48..9ff964b9 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -439,10 +439,7 @@ fn get_managed_mongodb_version(requested_version: String) -> Result Result Result Result Date: Fri, 18 Mar 2022 15:17:47 +0100 Subject: [PATCH 33/85] fix: DO kubeconfig (#642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: DO kubeconfig * fix: fix linter * fix: fix docker login Co-authored-by: Erèbe - Romain Gerard --- Cargo.lock | 1 + .../digitalocean/kubernetes/doks_api.rs | 30 +++ .../digitalocean/kubernetes/mod.rs | 175 ++++++++++++++---- .../digitalocean/models/do_api.rs | 0 .../digitalocean/models/doks.rs | 4 +- src/container_registry/docr.rs | 8 +- src/errors/io.rs | 2 + src/errors/mod.rs | 23 ++- test_utilities/Cargo.lock | 1 + test_utilities/Cargo.toml | 1 + test_utilities/src/utilities.rs | 107 +++++------ 11 files changed, 247 insertions(+), 105 deletions(-) create mode 100644 src/cloud_provider/digitalocean/models/do_api.rs diff --git a/Cargo.lock b/Cargo.lock index aefb2a9f..e5e49ce1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3282,6 +3282,7 @@ dependencies = [ "serde_derive", "serde_json", "time 0.2.27", + "tokio 1.10.0", "tracing", "tracing-subscriber", "url 2.2.2", diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index e07688f6..28319e1d 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -80,6 +80,36 @@ fn get_do_kubernetes_latest_slug_version( ))) } +pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Result, CommandError> { + let clusters_url = format!("{}/clusters", DoApiType::Doks.api_url()); + let clusters_response = do_get_from_api(token, DoApiType::Doks, clusters_url); + let clusters: Result = match clusters_response { + Ok(clusters_response) => match serde_json::from_str(clusters_response.as_str()) { + Ok(clusters) => Ok(clusters), + Err(e) => Err(CommandError::new_from_safe_message(e.to_string())), + }, + Err(e) => Err(CommandError::new_from_safe_message(e.message())), + }; + + let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters.clone(); + match clusters_copy + .into_iter() + .filter(|cluster| cluster.name == cluster_name.to_string()) + .collect::>() + .first() + .clone() + { + Some(cluster) => { + let kubeconfig_url = format!("{}/clusters/{}/kubeconfig", DoApiType::Doks.api_url(), cluster.id); + match do_get_from_api(token, DoApiType::Doks, kubeconfig_url) { + Ok(kubeconfig) => Ok(Some(kubeconfig)), + Err(e) => Err(CommandError::new_from_safe_message(e.message())), + } + } + None => Ok(None), + } +} + #[cfg(test)] mod tests_doks { use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index a807cdb0..fb12669e 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -1,5 +1,6 @@ use std::borrow::Borrow; use std::env; +use std::fs::File; use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; @@ -8,7 +9,7 @@ use crate::cloud_provider::aws::regions::AwsZones; use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ - get_do_latest_doks_slug_from_api, get_doks_info_from_name, + get_do_kubeconfig_by_cluster_name, get_do_latest_doks_slug_from_api, get_doks_info_from_name, }; use crate::cloud_provider::digitalocean::kubernetes::helm_charts::{do_helm_charts, ChartsConfigPrerequisites}; use crate::cloud_provider::digitalocean::kubernetes::node::DoInstancesType; @@ -36,14 +37,17 @@ use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_mana use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; -use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; +use crate::events::{ + EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, +}; use crate::logger::{LogLevel, Logger}; use crate::models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, - ProgressScope, QoveryIdentifier, ToHelmString, + ProgressScope, QoveryIdentifier, StringPath, ToHelmString, }; use crate::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use crate::object_storage::ObjectStorage; +use crate::runtime::block_on; use crate::string::terraform_list_format; use crate::{cmd, dns_provider}; use ::function_name::named; @@ -53,6 +57,7 @@ use retry::OperationResult; use std::path::Path; use std::str::FromStr; use std::sync::Arc; +use tokio::io::AsyncWriteExt; pub mod cidr; pub mod doks_api; @@ -613,25 +618,6 @@ impl DOKS { ), }; - // Kubeconfig bucket - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), - ), - ); - if let Err(e) = self.spaces.create_bucket(self.kubeconfig_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.kubeconfig_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - return Err(error); - } - // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { let error = EngineError::new_object_storage_cannot_create_bucket_error( @@ -652,25 +638,8 @@ impl DOKS { )); } - // push config file to object storage let kubeconfig_path = &self.get_kubeconfig_file_path()?; let kubeconfig_path = Path::new(kubeconfig_path); - let kubeconfig_name = format!("{}.yaml", self.id()); - if let Err(e) = self.spaces.put( - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str(), - kubeconfig_path.to_str().expect("No path for Kubeconfig"), - ) { - let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - kubeconfig_name.to_string(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - return Err(error); - } match self.check_workers_on_create() { Ok(_) => { @@ -1715,6 +1684,134 @@ impl Kubernetes for DOKS { ); Ok(()) } + + fn get_kubeconfig_file_path(&self) -> Result { + let (path, _) = self.get_kubeconfig_file()?; + Ok(path) + } + + fn get_kubeconfig_file(&self) -> Result<(String, File), EngineError> { + let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); + let bucket_name = format!("qovery-kubeconfigs-{}", self.id()); + let object_key = self.get_kubeconfig_filename(); + let stage = Stage::General(GeneralStep::RetrieveClusterConfig); + + // check if kubeconfig locally exists + let local_kubeconfig = match self.get_temp_dir(event_details.clone()) { + Ok(x) => { + let local_kubeconfig_folder_path = format!("{}/{}", &x, &bucket_name); + let local_kubeconfig_generated = format!("{}/{}", &local_kubeconfig_folder_path, &object_key); + if Path::new(&local_kubeconfig_generated).exists() { + match File::open(&local_kubeconfig_generated) { + Ok(_) => Some(local_kubeconfig_generated), + Err(err) => { + self.logger().log( + LogLevel::Debug, + EngineEvent::Debug( + self.get_event_details(stage.clone()), + EventMessage::new( + err.to_string(), + Some( + format!("Error, couldn't open {} file", &local_kubeconfig_generated,) + .to_string(), + ), + ), + ), + ); + None + } + } + } else { + None + } + } + Err(_) => None, + }; + + // otherwise, try to get it from digital ocean api + let result = match local_kubeconfig { + Some(local_kubeconfig_generated) => match File::open(&local_kubeconfig_generated) { + Ok(file) => Ok((StringPath::from(&local_kubeconfig_generated), file)), + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + None => { + let kubeconfig = match get_do_kubeconfig_by_cluster_name(self.cloud_provider.token(), self.name()) { + Ok(kubeconfig) => Ok(kubeconfig), + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.message(), Some(e.message())), + )), + } + .expect("Unable to get kubeconfig"); + + let workspace_directory = crate::fs::workspace_directory( + self.context().workspace_root_dir(), + self.context().execution_id(), + format!("object-storage/scaleway_os/{}", self.name()), + ) + .map_err(|err| { + EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(err.to_string(), Some(err.to_string())), + ) + }) + .expect("Unable to create directory"); + + let file_path = format!( + "{}/{}/{}", + workspace_directory, + format!("qovery-kubeconfigs-{}", self.id()), + format!("{}.yaml", self.id()) + ); + let path = Path::new(file_path.as_str()); + let parent_dir = path.parent().unwrap(); + let _ = block_on(tokio::fs::create_dir_all(parent_dir)); + + match block_on( + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path), + ) { + Ok(mut created_file) => match kubeconfig.is_some() { + false => Err(EngineError::new_cannot_create_file( + event_details.clone(), + CommandError::new( + "No kubeconfig found".to_string(), + Some("No kubeconfig found".to_string()), + ), + )), + true => match block_on(created_file.write_all(kubeconfig.unwrap().as_bytes())) { + Ok(_) => { + let file = File::open(path).unwrap(); + Ok((file_path, file)) + } + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + }, + Err(e) => Err(EngineError::new_cannot_create_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + } + } + }; + + match result { + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.message(), Some(e.message())), + )), + Ok((file_path, file)) => Ok((file_path, file)), + } + } } impl Listen for DOKS { diff --git a/src/cloud_provider/digitalocean/models/do_api.rs b/src/cloud_provider/digitalocean/models/do_api.rs new file mode 100644 index 00000000..e69de29b diff --git a/src/cloud_provider/digitalocean/models/doks.rs b/src/cloud_provider/digitalocean/models/doks.rs index 0fa04c8e..e7a9394d 100644 --- a/src/cloud_provider/digitalocean/models/doks.rs +++ b/src/cloud_provider/digitalocean/models/doks.rs @@ -1,11 +1,11 @@ use serde::{Deserialize, Serialize}; -#[derive(Default, Serialize, Deserialize, PartialEq, Debug)] +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct DoksList { pub kubernetes_clusters: Vec, } -#[derive(Default, Serialize, Deserialize, PartialEq, Debug)] +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct KubernetesCluster { pub id: String, pub name: String, diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index cd686f30..3f904fe1 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -26,7 +26,7 @@ pub struct DOCR { pub name: String, pub api_key: String, pub id: String, - pub registry_info: ContainerRegistryInfo, + pub registry_info: Option, pub listeners: Listeners, pub logger: Box, } @@ -55,12 +55,13 @@ impl DOCR { name: name.to_string(), api_key: api_key.into(), id: id.into(), - registry_info, listeners: vec![], logger, + registry_info: Some(registry_info), }; let event_details = cr.get_event_details(); + if cr.context.docker.login(&cr.registry_info.endpoint).is_err() { return Err(EngineError::new_client_invalid_cloud_provider_credentials( event_details, @@ -222,7 +223,8 @@ impl ContainerRegistry for DOCR { } fn registry_info(&self) -> &ContainerRegistryInfo { - &self.registry_info + // At this point the registry info should be initialize, so unwrap is safe + self.registry_info.as_ref().unwrap() } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/errors/io.rs b/src/errors/io.rs index 17c63858..de78d756 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -28,6 +28,7 @@ pub enum Tag { CannotGetWorkspaceDirectory, UnsupportedInstanceType, CannotRetrieveClusterConfigFile, + CannotCreateFile, CannotGetClusterNodes, NotEnoughResourcesToDeployEnvironment, CannotUninstallHelmChart, @@ -116,6 +117,7 @@ impl From for Tag { errors::Tag::Unknown => Tag::Unknown, errors::Tag::UnsupportedInstanceType => Tag::UnsupportedInstanceType, errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile, + errors::Tag::CannotCreateFile => Tag::CannotCreateFile, errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes, errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment, errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable, diff --git a/src/errors/mod.rs b/src/errors/mod.rs index b6531c75..91c0fa2a 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -120,6 +120,8 @@ pub enum Tag { UnsupportedZone, /// CannotRetrieveKubernetesConfigFile: represents an error while trying to retrieve Kubernetes config file. CannotRetrieveClusterConfigFile, + /// CannotCreateFile: represents an error while trying to create a file. + CannotCreateFile, /// CannotGetClusterNodes: represents an error while trying to get cluster's nodes. CannotGetClusterNodes, /// NotEnoughResourcesToDeployEnvironment: represents an error when trying to deploy an environment but there are not enough resources available on the cluster. @@ -626,7 +628,7 @@ impl EngineError { event_details: EventDetails, error_message: CommandError, ) -> EngineError { - let message = "Cannot retrieve Kubernetes instance type is not supported"; + let message = "Cannot retrieve Kubernetes kubeconfig"; EngineError::new( event_details, Tag::CannotRetrieveClusterConfigFile, @@ -638,6 +640,25 @@ impl EngineError { ) } + /// Creates new error for file we can't create. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error_message`: Raw error message. + pub fn new_cannot_create_file(event_details: EventDetails, error_message: CommandError) -> EngineError { + let message = "Cannot create file"; + EngineError::new( + event_details, + Tag::CannotCreateFile, + message.to_string(), + message.to_string(), + Some(error_message), + None, + None, + ) + } + /// Creates new error for Kubernetes cannot get nodes. /// /// Arguments: diff --git a/test_utilities/Cargo.lock b/test_utilities/Cargo.lock index 46e83ffb..e737331b 100644 --- a/test_utilities/Cargo.lock +++ b/test_utilities/Cargo.lock @@ -3320,6 +3320,7 @@ dependencies = [ "serde_derive", "serde_json", "time 0.2.24", + "tokio 1.10.0", "tracing", "tracing-subscriber", "url 2.2.2", diff --git a/test_utilities/Cargo.toml b/test_utilities/Cargo.toml index 01e81fbd..d74d5875 100644 --- a/test_utilities/Cargo.toml +++ b/test_utilities/Cargo.toml @@ -29,6 +29,7 @@ maplit = "1.0.2" uuid = { version = "0.8", features = ["v4"] } const_format = "0.2.22" url = "2.2.2" +tokio = { version = "1.10.0", features = ["full"] } # Digital Ocean Deps digitalocean = "0.1.1" diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index c004bc8e..df9a94e5 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -9,17 +9,19 @@ use curl::easy::Easy; use dirs::home_dir; use gethostname; use std::collections::BTreeMap; -use std::io::{Error, ErrorKind, Read, Write}; +use std::io::{Error, ErrorKind, Write}; use std::path::Path; use std::str::FromStr; use passwords::PasswordGenerator; +use qovery_engine::cloud_provider::digitalocean::kubernetes::doks_api::get_do_kubeconfig_by_cluster_name; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use retry::delay::Fibonacci; use retry::OperationResult; use std::env; use std::fs; +use tokio::io::AsyncWriteExt; use tracing::{info, warn}; use crate::scaleway::{ @@ -44,7 +46,6 @@ use crate::digitalocean::{ DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, }; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cmd::command::QoveryCommand; use qovery_engine::cmd::docker::Docker; use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc}; @@ -52,8 +53,6 @@ use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC}; use qovery_engine::errors::CommandError; use qovery_engine::logger::{Logger, StdIoLogger}; use qovery_engine::models::DatabaseMode::MANAGED; -use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces}; -use qovery_engine::object_storage::ObjectStorage; use qovery_engine::runtime::block_on; use time::Instant; @@ -536,64 +535,52 @@ where ) } Kind::Do => { - let region_raw = secrets - .DIGITAL_OCEAN_DEFAULT_REGION - .as_ref() - .expect(&"DIGITAL_OCEAN_DEFAULT_REGION should be set".to_string()) - .to_string(); + let cluster_name = format!("qovery-{}", context.cluster_id()); + let kubeconfig = match get_do_kubeconfig_by_cluster_name( + secrets.clone().DIGITAL_OCEAN_TOKEN.unwrap().as_str(), + cluster_name.clone().as_str(), + ) { + Ok(kubeconfig) => Ok(kubeconfig), + Err(e) => Err(CommandError::new(e.message(), Some(e.message()))), + } + .expect("Unable to get kubeconfig"); - match DoRegion::from_str(region_raw.as_str()) { - Ok(region) => { - let spaces = Spaces::new( - context.clone(), - "fake".to_string(), - "fake".to_string(), - secrets - .DIGITAL_OCEAN_SPACES_ACCESS_ID - .as_ref() - .expect(&"DIGITAL_OCEAN_SPACES_ACCESS_ID should be set".to_string()) - .to_string(), - secrets - .DIGITAL_OCEAN_SPACES_SECRET_ID - .as_ref() - .expect(&"DIGITAL_OCEAN_SPACES_SECRET_ID should be set".to_string()) - .to_string(), - region, - BucketDeleteStrategy::HardDelete, - ); + let workspace_directory = qovery_engine::fs::workspace_directory( + context.workspace_root_dir(), + context.execution_id(), + format!("object-storage/scaleway_os/{}", cluster_name.clone()), + ) + .map_err(|err| CommandError::new(err.to_string(), Some(err.to_string()))) + .expect("Unable to create directory"); - match spaces.get( - kubernetes_config_bucket_name.as_str(), - kubernetes_config_object_key.as_str(), - false, - ) { - Ok((_, mut file)) => { - let mut content = String::new(); - match file.read_to_string(&mut content) { - Ok(_) => Ok(content), - Err(e) => { - let message_safe = "Error while trying to read file"; - Err(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e), - Some(message_safe.to_string()), - )) - } - } - } - Err(e) => { - let message_safe = "Error while trying to get kubeconfig from spaces"; - Err(CommandError::new( - format!( - "{}, error: {}", - message_safe.to_string(), - e.message.unwrap_or("no error message".to_string()) - ), - Some(message_safe.to_string()), - )) - } - } - } - Err(e) => Err(e), + let file_path = format!( + "{}/{}/{}", + workspace_directory, + format!("qovery-kubeconfigs-{}", context.cluster_id()), + format!("{}.yaml", context.cluster_id()) + ); + let path = Path::new(file_path.as_str()); + let parent_dir = path.parent().unwrap(); + let _ = block_on(tokio::fs::create_dir_all(parent_dir)); + + match block_on( + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path), + ) { + Ok(mut created_file) => match kubeconfig.is_some() { + false => Err(CommandError::new( + "No kubeconfig found".to_string(), + Some("No kubeconfig found".to_string()), + )), + true => match block_on(created_file.write_all(kubeconfig.unwrap().as_bytes())) { + Ok(_) => Ok(file_path), + Err(e) => Err(CommandError::new(e.to_string(), Some(e.to_string()))), + }, + }, + Err(e) => Err(CommandError::new(e.to_string(), Some(e.to_string()))), } } Kind::Scw => { From 6d98894d9ce4af3aa24b661b9d82b5f0478d5323 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 18 Mar 2022 15:19:04 +0100 Subject: [PATCH 34/85] Fix bad merge --- src/cloud_provider/scaleway/router.rs | 8 ++++---- src/container_registry/docr.rs | 7 +++---- src/transaction.rs | 5 ----- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 6db22aca..8449a5be 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -194,13 +194,13 @@ impl Service for Router { Ok(context) } - fn selector(&self) -> Option { - Some(format!("routerId={}", self.id)) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("routerId={}", self.id)) + } } impl crate::cloud_provider::service::Router for Router { diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 3f904fe1..d4bf8e61 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -26,7 +26,7 @@ pub struct DOCR { pub name: String, pub api_key: String, pub id: String, - pub registry_info: Option, + pub registry_info: ContainerRegistryInfo, pub listeners: Listeners, pub logger: Box, } @@ -57,7 +57,7 @@ impl DOCR { id: id.into(), listeners: vec![], logger, - registry_info: Some(registry_info), + registry_info, }; let event_details = cr.get_event_details(); @@ -223,8 +223,7 @@ impl ContainerRegistry for DOCR { } fn registry_info(&self) -> &ContainerRegistryInfo { - // At this point the registry info should be initialize, so unwrap is safe - self.registry_info.as_ref().unwrap() + &self.registry_info } fn create_registry(&self) -> Result<(), EngineError> { diff --git a/src/transaction.rs b/src/transaction.rs index bb3af39a..a8a6b578 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -382,11 +382,6 @@ impl<'a> Transaction<'a> { let execution_id = self.engine.context().execution_id(); let lh = ListenersHelper::new(self.engine.kubernetes().listeners()); - // 100 ms sleep to avoid race condition on last service status update - // Otherwise, the last status sent to the CORE is (sometimes) not the right one. - // Even by storing data at the micro seconds precision - thread::sleep(std::time::Duration::from_millis(100)); - match result { Err(err) => { warn!("infrastructure ROLLBACK STARTED! an error occurred {:?}", err); From 2776cdd87a4bf998f24181d4c1a3b9e79daf7572 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 18 Mar 2022 19:48:48 +0100 Subject: [PATCH 35/85] Fix buildpack + cleanup for image hash (#651) --- src/build_platform/local_docker.rs | 159 ++++++++++++++--------------- src/build_platform/mod.rs | 7 +- src/cmd/docker.rs | 32 ++++++ src/git.rs | 40 +++++--- src/models.rs | 26 ++++- 5 files changed, 161 insertions(+), 103 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index aa747159..04e4e8a0 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -1,5 +1,5 @@ use std::io::{Error, ErrorKind}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::time::Duration; use std::{env, fs}; @@ -10,7 +10,7 @@ use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentia use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; -use crate::cmd::docker::{ContainerImage, DockerError}; +use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; @@ -56,7 +56,11 @@ impl LocalDocker { } fn get_docker_host_envs(&self) -> Vec<(&str, &str)> { - vec![] + if let Some(socket_path) = self.context.docker_tcp_socket() { + vec![("DOCKER_HOST", socket_path.as_str())] + } else { + vec![] + } } /// Read Dockerfile content from location path and return an array of bytes @@ -384,33 +388,34 @@ impl BuildPlatform for LocalDocker { fn build(&self, build: Build, is_task_canceled: &dyn Fn() -> bool) -> Result { let event_details = self.get_event_details(); + let listeners_helper = ListenersHelper::new(&self.listeners); + let app_id = build.image.application_id.clone(); - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("LocalDocker.build() called".to_string()), - ), - ); - + // check if we should already abort the task if is_task_canceled() { return Err(EngineError::new_task_cancellation_requested(event_details.clone())); } - let listeners_helper = ListenersHelper::new(&self.listeners); - let repository_root_path = self.get_repository_build_root_path(&build)?; - + // LOGGING + let repository_root_path = PathBuf::from(self.get_repository_build_root_path(&build)?); + let msg = format!( + "Cloning repository: {} to {:?}", + build.git_repository.url, repository_root_path + ); + listeners_helper.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { id: app_id }, + ProgressLevel::Info, + Some(msg.clone()), + self.context.execution_id(), + )); self.logger.log( LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Cloning repository: {} to {}", - build.git_repository.url, repository_root_path - )), - ), + EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg)), ); + // LOGGING + // Create callback that will be called by git to provide credentials per user + // If people use submodule, they need to provide us their ssh key let get_credentials = |user: &str| { let mut creds: Vec<(CredentialType, Cred)> = Vec::with_capacity(build.git_repository.ssh_keys.len() + 1); for ssh_key in build.git_repository.ssh_keys.iter() { @@ -431,17 +436,13 @@ impl BuildPlatform for LocalDocker { creds }; - if Path::new(repository_root_path.as_str()).exists() { - // remove folder before cloning it again - // FIXME: reuse this folder and checkout the right commit - let _ = fs::remove_dir_all(repository_root_path.as_str()); - } - - // git clone - if is_task_canceled() { - return Err(EngineError::new_task_cancellation_requested(event_details.clone())); + // Cleanup, mono repo can require to clone multiple time the same repo + // FIXME: re-use the same repo and just checkout at the correct commit + if repository_root_path.exists() { + let _ = fs::remove_dir_all(&repository_root_path); } + // Do the real git clone if let Err(clone_error) = git::clone_at_commit( &build.git_repository.url, &build.git_repository.commit_id, @@ -460,6 +461,10 @@ impl BuildPlatform for LocalDocker { return Err(error); } + if is_task_canceled() { + return Err(EngineError::new_task_cancellation_requested(event_details.clone())); + } + let mut disable_build_cache = false; let mut env_var_args: Vec = Vec::with_capacity(build.options.environment_variables.len()); @@ -498,8 +503,8 @@ impl BuildPlatform for LocalDocker { if disk.get_mount_point() == docker_path { let event_details = self.get_event_details(); if let Err(e) = check_docker_space_usage_and_clean( + &self.context.docker, disk, - self.get_docker_host_envs(), event_details.clone(), &*self.logger(), ) { @@ -507,7 +512,7 @@ impl BuildPlatform for LocalDocker { LogLevel::Warning, EngineEvent::Warning( event_details.clone(), - EventMessage::new(e.message_raw(), e.message_safe()), + EventMessage::new(e.to_string(), Some(e.to_string())), ), ); } @@ -518,22 +523,42 @@ impl BuildPlatform for LocalDocker { } let app_id = build.image.application_id.clone(); - let build_context_path = format!("{}/{}/.", repository_root_path.as_str(), build.git_repository.root_path); + + // Check that the build context is correct + let build_context_path = repository_root_path.join(&build.git_repository.root_path); + if !build_context_path.is_dir() { + listeners_helper.error(ProgressInfo::new( + ProgressScope::Application { id: app_id.clone() }, + ProgressLevel::Error, + Some(format!( + "Application build context is not present at location {:?}", + build_context_path + )), + self.context.execution_id(), + )); + + let error = EngineError::new_docker_cannot_find_dockerfile( + self.get_event_details(), + build_context_path.to_str().unwrap_or_default().to_string(), + ); + + self.logger + .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + + return Err(error); + } + + // now we have to decide if we use buildpack or docker to build our application + // if dockerfile_path is not present it means we need to use buildpack + // If no Dockerfile specified, we should use BuildPacks - let result = if build.git_repository.dockerfile_path.is_some() { + let result = if let Some(dockerfile_path) = &build.git_repository.dockerfile_path { // build container from the provided Dockerfile - let dockerfile_relative_path = build.git_repository.dockerfile_path.as_ref().unwrap(); - let dockerfile_normalized_path = match dockerfile_relative_path.trim() { - "" | "." | "/" | "/." | "./" | "Dockerfile" => "Dockerfile", - dockerfile_root_path => dockerfile_root_path, - }; - - let dockerfile_relative_path = format!("{}/{}", build.git_repository.root_path, dockerfile_normalized_path); - let dockerfile_absolute_path = format!("{}/{}", repository_root_path.as_str(), dockerfile_relative_path); + let dockerfile_absolute_path = repository_root_path.join(dockerfile_path); // If the dockerfile does not exist, abort - if !Path::new(dockerfile_absolute_path.as_str()).exists() { + if !dockerfile_absolute_path.is_file() { listeners_helper.error(ProgressInfo::new( ProgressScope::Application { id: build.image.application_id.clone(), @@ -541,13 +566,15 @@ impl BuildPlatform for LocalDocker { ProgressLevel::Error, Some(format!( "Dockerfile is not present at location {}", - dockerfile_relative_path + dockerfile_absolute_path.display() )), self.context.execution_id(), )); - let error = - EngineError::new_docker_cannot_find_dockerfile(self.get_event_details(), dockerfile_absolute_path); + let error = EngineError::new_docker_cannot_find_dockerfile( + self.get_event_details(), + dockerfile_absolute_path.to_str().unwrap_or_default().to_string(), + ); self.logger .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -557,8 +584,8 @@ impl BuildPlatform for LocalDocker { self.build_image_with_docker( build, - dockerfile_absolute_path.as_str(), - build_context_path.as_str(), + dockerfile_absolute_path.to_str().unwrap_or_default(), + build_context_path.to_str().unwrap_or_default(), env_var_args, &listeners_helper, is_task_canceled, @@ -567,7 +594,7 @@ impl BuildPlatform for LocalDocker { // build container with Buildpacks self.build_image_with_buildpacks( build, - build_context_path.as_str(), + build_context_path.to_str().unwrap_or_default(), env_var_args, !disable_build_cache, &listeners_helper, @@ -626,11 +653,11 @@ impl ToTransmitter for LocalDocker { } fn check_docker_space_usage_and_clean( + docker: &Docker, docker_path_size_info: &Disk, - envs: Vec<(&str, &str)>, event_details: EventDetails, logger: &dyn Logger, -) -> Result<(), CommandError> { +) -> Result<(), DockerError> { let docker_max_disk_percentage_usage_before_purge = 60; // arbitrary percentage that should make the job anytime let available_space = docker_path_size_info.get_available_space(); let docker_percentage_remaining = available_space * 100 / docker_path_size_info.get_total_space(); @@ -647,7 +674,7 @@ fn check_docker_space_usage_and_clean( ), ); - return docker_prune_images(envs); + return docker.prune_images(); }; logger.log( @@ -665,31 +692,3 @@ fn check_docker_space_usage_and_clean( Ok(()) } - -fn docker_prune_images(envs: Vec<(&str, &str)>) -> Result<(), CommandError> { - let all_prunes_commands = vec![ - vec!["container", "prune", "-f"], - vec!["image", "prune", "-a", "-f"], - vec!["builder", "prune", "-a", "-f"], - vec!["volume", "prune", "-f"], - vec!["buildx", "prune", "-a", "-f"], - ]; - - let mut errored_commands = vec![]; - for prune in all_prunes_commands { - let mut cmd = QoveryCommand::new("docker", &prune, &envs); - let cmd_killer = CommandKiller::from_timeout(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC)); - if let Err(e) = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &cmd_killer) { - errored_commands.push(format!("{} {:?}", prune[0], e)); - } - } - - if errored_commands.len() > 0 { - return Err(CommandError::new( - errored_commands.join("/ "), - Some("Error while trying to prune images.".to_string()), - )); - } - - Ok(()) -} diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 0b9ef095..49cdf6a1 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -5,6 +5,7 @@ use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::logger::Logger; use crate::models::{Context, Listen, QoveryIdentifier}; use std::fmt::{Display, Formatter, Result as FmtResult}; +use std::path::PathBuf; use url::Url; pub mod docker; @@ -65,12 +66,12 @@ pub struct SshKey { } pub struct GitRepository { - pub url: String, + pub url: Url, pub credentials: Option, pub ssh_keys: Vec, pub commit_id: String, - pub dockerfile_path: Option, - pub root_path: String, + pub dockerfile_path: Option, + pub root_path: PathBuf, pub buildpack_language: Option, } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index ccda8881..6bcfe9f7 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -417,6 +417,38 @@ impl Docker { should_abort, ) } + + pub fn prune_images(&self) -> Result<(), DockerError> { + info!("Docker prune images"); + + let all_prunes_commands = vec![ + vec!["container", "prune", "-f"], + vec!["image", "prune", "-a", "-f"], + vec!["builder", "prune", "-a", "-f"], + vec!["volume", "prune", "-f"], + vec!["buildx", "prune", "-a", "-f"], + ]; + + let mut errored_commands = vec![]; + for prune in all_prunes_commands { + let ret = docker_exec( + &prune, + &self.get_all_envs(&vec![]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); + if let Err(e) = ret { + errored_commands.push(e); + } + } + + if !errored_commands.is_empty() { + return Err(errored_commands.remove(0)); + } + + Ok(()) + } } fn docker_exec( diff --git a/src/git.rs b/src/git.rs index 0176fc9d..fbbb124e 100644 --- a/src/git.rs +++ b/src/git.rs @@ -68,17 +68,14 @@ fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, } fn clone

( - repository_url: &str, + repository_url: &Url, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, ) -> Result where P: AsRef, { - let url = Url::parse(repository_url) - .map_err(|err| Error::from_str(format!("Invalid repository url {}: {}", repository_url, err).as_str()))?; - - if url.scheme() != "https" { + if repository_url.scheme() != "https" { return Err(Error::from_str("Repository URL have to start with https://")); } @@ -98,11 +95,11 @@ where let _ = std::fs::remove_dir_all(into_dir.as_ref()); } - repo.clone(url.as_str(), into_dir.as_ref()) + repo.clone(repository_url.as_str(), into_dir.as_ref()) } pub fn clone_at_commit

( - repository_url: &str, + repository_url: &Url, commit_id: &str, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, @@ -140,7 +137,7 @@ where } pub fn get_parent_commit_id

( - repository_url: &str, + repository_url: &Url, commit_id: &str, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, @@ -164,6 +161,7 @@ where mod tests { use crate::git::{checkout, clone, clone_at_commit, get_parent_commit_id}; use git2::{Cred, CredentialType}; + use url::Url; use uuid::Uuid; struct DirectoryForTests { @@ -196,18 +194,26 @@ mod tests { let repo_path = repo_dir.path(); // We only allow https:// at the moment - let repo = clone("git@github.com:Qovery/engine.git", &repo_path, &|_| vec![]); - assert!(matches!(repo, Err(e) if e.message().contains("Invalid repository"))); + let repo = clone( + &Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), + &repo_path, + &|_| vec![], + ); + assert!(matches!(repo, Err(e) if e.message().contains("https://"))); // Repository must be empty - let repo = clone("https://github.com/Qovery/engine-testing.git", &repo_path, &|_| vec![]); + let repo = clone( + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), + &repo_path, + &|_| vec![], + ); assert!(repo.is_ok()); // clone makes sure to empty the directory // Working case { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_clone".to_string()); let repo = clone( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![], ); @@ -224,7 +230,7 @@ mod tests { )] }; let repo = clone( - "https://gitlab.com/qovery/q-core.git", + &Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), clone_dir.path(), &get_credentials, ); @@ -261,7 +267,7 @@ mod tests { fn test_git_checkout() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_checkout".to_string()); let repo = clone( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![], ) @@ -283,7 +289,7 @@ mod tests { fn test_git_parent_id() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_parent_id".to_string()); let result = get_parent_commit_id( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "964f02f3a3065bc7f6fb745d679b1ddb21153cc7", clone_dir.path(), &|_| vec![], @@ -299,7 +305,7 @@ mod tests { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_parent_id_not_existing".to_string()); let result = get_parent_commit_id( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "964f02f3a3065bc7f6fb745d679b1ddb21153cc0", clone_dir.path(), &|_| vec![], @@ -333,7 +339,7 @@ mod tests { ] }; let repo = clone_at_commit( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "9a9c1f4373c8128151a9def9ea3d838fa2ed33e8", clone_dir.path(), &get_credentials, diff --git a/src/models.rs b/src/models.rs index 30bc4881..84066c4e 100644 --- a/src/models.rs +++ b/src/models.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; use std::hash::Hash; use std::net::Ipv4Addr; +use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; @@ -427,17 +428,36 @@ impl Application { }); } + // Convert our root path to an relative path to be able to append them correctly + let root_path = if Path::new(&self.root_path).is_absolute() { + PathBuf::from(self.root_path.trim_start_matches('/')) + } else { + PathBuf::from(&self.root_path) + }; + assert!(root_path.is_relative(), "root path is not a relative path"); + + let dockerfile_path = self.dockerfile_path.as_ref().map(|path| { + if Path::new(&path).is_absolute() { + root_path.join(path.trim_start_matches('/')) + } else { + root_path.join(&path) + } + }); + + //FIXME: Return a result the function + let url = Url::parse(&self.git_url).unwrap_or_else(|_| Url::parse("https://invalid-git-url.com").unwrap()); + Build { git_repository: GitRepository { - url: self.git_url.clone(), + url, credentials: self.git_credentials.as_ref().map(|credentials| Credentials { login: credentials.login.clone(), password: credentials.access_token.clone(), }), ssh_keys, commit_id: self.commit_id.clone(), - dockerfile_path: self.dockerfile_path.clone(), - root_path: self.root_path.clone(), + dockerfile_path, + root_path, buildpack_language: self.buildpack_language.clone(), }, image: self.to_image(registry_url), From daa5861b23aef4693cae2727ae17955644b4860a Mon Sep 17 00:00:00 2001 From: Bilel Benamira Date: Fri, 18 Mar 2022 22:30:33 +0100 Subject: [PATCH 36/85] refactor: remove unused function call (#647) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: remove unused function call * fix: format issue Co-authored-by: Erèbe - Romain Gerard Co-authored-by: MacLikorne --- src/cloud_provider/aws/application.rs | 4 +- src/cloud_provider/aws/router.rs | 4 +- .../digitalocean/application.rs | 4 +- src/cloud_provider/digitalocean/router.rs | 4 +- src/cloud_provider/scaleway/application.rs | 4 +- src/cloud_provider/scaleway/router.rs | 4 +- src/cloud_provider/service.rs | 65 +------------------ 7 files changed, 14 insertions(+), 75 deletions(-) diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs index cc11139a..9fe7bb24 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/cloud_provider/aws/application.rs @@ -380,7 +380,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } @@ -401,7 +401,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } } diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index fd4aada7..bef43303 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -468,7 +468,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -486,6 +486,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index 081cd654..fb24f5b2 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -384,7 +384,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } @@ -405,7 +405,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } } diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index 7eb80eb6..ceaa1671 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -484,7 +484,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -502,6 +502,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index 0c9c2abe..81cd52e8 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -396,7 +396,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } @@ -417,7 +417,7 @@ impl Delete for Application { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) + delete_stateless_service(target, self, event_details.clone()) }) } } diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 8449a5be..93a5d99c 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -422,7 +422,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -440,6 +440,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index f91b5422..182700ce 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -535,24 +535,18 @@ pub fn scale_down_application( }) } -pub fn delete_router( - target: &DeploymentTarget, - service: &T, - is_error: bool, - event_details: EventDetails, -) -> Result<(), EngineError> +pub fn delete_router(target: &DeploymentTarget, service: &T, event_details: EventDetails) -> Result<(), EngineError> where T: Router, { send_progress_on_long_task(service, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, service, is_error, event_details.clone()) + delete_stateless_service(target, service, event_details.clone()) }) } pub fn delete_stateless_service( target: &DeploymentTarget, service: &T, - is_error: bool, event_details: EventDetails, ) -> Result<(), EngineError> where @@ -562,15 +556,6 @@ where let environment = target.environment; let helm_release_name = service.helm_release_name(); - if is_error { - let _ = get_stateless_resource_information( - kubernetes, - environment, - service.selector().unwrap_or("".to_string()).as_str(), - Stage::Environment(EnvironmentStep::Delete), - )?; - } - // clean the resource let _ = helm_uninstall_release( kubernetes, @@ -1252,52 +1237,6 @@ where Ok(result) } -/// show different output (kubectl describe, log..) for debug purpose -pub fn get_stateless_resource_information( - kubernetes: &dyn Kubernetes, - environment: &Environment, - selector: &str, - stage: Stage, -) -> Result<(Describe, Logs), EngineError> { - let event_details = kubernetes.get_event_details(stage); - let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - // exec describe pod... - let describe = crate::cmd::kubectl::kubectl_exec_describe_pod( - kubernetes_config_file_path.to_string(), - environment.namespace(), - selector, - kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| { - EngineError::new_k8s_describe( - event_details.clone(), - selector.to_string(), - environment.namespace().to_string(), - e, - ) - })?; - - // exec logs... - let logs = crate::cmd::kubectl::kubectl_exec_logs( - kubernetes_config_file_path.to_string(), - environment.namespace(), - selector, - kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| { - EngineError::new_k8s_get_logs_error( - event_details.clone(), - selector.to_string(), - environment.namespace().to_string(), - e, - ) - })? - .join("\n"); - - Ok((describe, logs)) -} - pub fn helm_uninstall_release( kubernetes: &dyn Kubernetes, environment: &Environment, From db60d2691d6f3b615035145c2808b10f396c8f43 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Mon, 21 Mar 2022 10:59:08 +0100 Subject: [PATCH 37/85] feat: object storage to use new logging (#652) Ticket: ENG-1134 --- .../digitalocean/kubernetes/mod.rs | 4 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 8 +- src/errors/io.rs | 10 + src/errors/mod.rs | 249 +++++++++++++----- src/object_storage/errors.rs | 61 +++++ src/object_storage/mod.rs | 29 +- src/object_storage/s3.rs | 216 +++++---------- src/object_storage/scaleway_object_storage.rs | 227 ++++++---------- src/object_storage/spaces.rs | 171 ++++++------ tests/scaleway/scw_container_registry.rs | 1 + 11 files changed, 515 insertions(+), 469 deletions(-) create mode 100644 src/object_storage/errors.rs diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index fb12669e..c836e18a 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -623,7 +623,7 @@ impl DOKS { let error = EngineError::new_object_storage_cannot_create_bucket_error( event_details.clone(), self.logs_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -1163,7 +1163,7 @@ impl DOKS { ), ), Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); + let message_safe = format!("Error deleting chart `{}`", chart.name); self.logger().log( LogLevel::Error, EngineEvent::Deleting( diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index db0390d4..85c6e422 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -137,13 +137,7 @@ pub trait Kubernetes: Listen { Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( self.get_event_details(stage.clone()), - CommandError::new_from_safe_message( - format!( - "Error getting file from store, error: {}", - err.message.unwrap_or_else(|| "no details.".to_string()) - ) - .to_string(), - ), + err.into(), ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index cfa0bea2..b9eb650b 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -758,7 +758,7 @@ impl Kapsule { let error = EngineError::new_object_storage_cannot_create_bucket_error( event_details.clone(), self.kubeconfig_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -770,7 +770,7 @@ impl Kapsule { let error = EngineError::new_object_storage_cannot_create_bucket_error( event_details.clone(), self.logs_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -798,7 +798,7 @@ impl Kapsule { event_details.clone(), self.logs_bucket_name(), kubeconfig_name.to_string(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -1605,7 +1605,7 @@ impl Kapsule { ), ), Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); + let message_safe = format!("Error deleting chart `{}`", chart.name); self.logger().log( LogLevel::Error, EngineEvent::Deleting( diff --git a/src/errors/io.rs b/src/errors/io.rs index de78d756..1caaf02a 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -109,6 +109,10 @@ pub enum Tag { ContainerRegistryRepositoryDoesntExist, ContainerRegistryDeleteRepositoryError, ContainerRegistryDeleteImageError, + ObjectStorageInvalidBucketName, + ObjectStorageCannotEmptyBucket, + ObjectStorageCannotTagBucket, + ObjectStorageCannotActivateBucketVersioning, } impl From for Tag { @@ -210,6 +214,12 @@ impl From for Tag { errors::Tag::ContainerRegistryDeleteRepositoryError => Tag::ContainerRegistryDeleteRepositoryError, errors::Tag::BuilderDockerCannotListImages => Tag::BuilderDockerCannotListImages, errors::Tag::DockerError => Tag::DockerError, + errors::Tag::ObjectStorageInvalidBucketName => Tag::ObjectStorageInvalidBucketName, + errors::Tag::ObjectStorageCannotEmptyBucket => Tag::ObjectStorageCannotEmptyBucket, + errors::Tag::ObjectStorageCannotTagBucket => Tag::ObjectStorageCannotTagBucket, + errors::Tag::ObjectStorageCannotActivateBucketVersioning => { + Tag::ObjectStorageCannotActivateBucketVersioning + } } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 91c0fa2a..c13758b2 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -9,10 +9,13 @@ use crate::cmd::helm::HelmError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; use crate::models::QoveryIdentifier; +use crate::object_storage::errors::ObjectStorageError; +use std::fmt::{Display, Formatter}; +use thiserror::Error; use url::Url; /// CommandError: command error, mostly returned by third party tools. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Error, PartialEq)] pub struct CommandError { /// message: full error message, can contains unsafe text such as passwords and tokens. message_raw: String, @@ -99,6 +102,18 @@ impl CommandError { } } +impl Display for CommandError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.message().as_str()) + } +} + +impl From for CommandError { + fn from(object_storage_error: ObjectStorageError) -> Self { + CommandError::new_from_safe_message(object_storage_error.to_string()) + } +} + #[derive(Clone, Debug, PartialEq)] /// Tag: unique identifier for an error. pub enum Tag { @@ -278,6 +293,14 @@ pub enum Tag { ContainerRegistryRepositoryDoesntExist, /// ContainerRegistryDeleteRepositoryError: represents an error while trying to delete a repository. ContainerRegistryDeleteRepositoryError, + /// ObjectStorageInvalidBucketName: represents an error, bucket name is not valid. + ObjectStorageInvalidBucketName, + /// ObjectStorageCannotEmptyBucket: represents an error while trying to empty an object storage bucket. + ObjectStorageCannotEmptyBucket, + /// ObjectStorageCannotTagBucket: represents an error while trying to tag an object storage bucket. + ObjectStorageCannotTagBucket, + /// ObjectStorageCannotActivateBucketVersioning: represents an error while trying to activate bucket versioning for bucket. + ObjectStorageCannotActivateBucketVersioning, } #[derive(Clone, Debug)] @@ -634,7 +657,7 @@ impl EngineError { Tag::CannotRetrieveClusterConfigFile, message.to_string(), message.to_string(), - Some(error_message), + Some(error_message.into()), None, None, ) @@ -1852,65 +1875,6 @@ impl EngineError { ) } - /// Creates new object storage cannot create bucket. - /// - /// Arguments: - /// - /// * `event_details`: Error linked event details. - /// * `bucket_name`: Object storage bucket name. - /// * `raw_error`: Raw error message. - pub fn new_object_storage_cannot_create_bucket_error( - event_details: EventDetails, - bucket_name: String, - raw_error: CommandError, - ) -> EngineError { - let message = format!( - "Error, cannot create object storage bucket `{}`.", - bucket_name.to_string(), - ); - - EngineError::new( - event_details, - Tag::ObjectStorageCannotCreateBucket, - message.to_string(), - message.to_string(), - Some(raw_error), - None, - None, - ) - } - - /// Creates new object storage cannot put file into bucket. - /// - /// Arguments: - /// - /// * `event_details`: Error linked event details. - /// * `bucket_name`: Object storage bucket name. - /// * `file_name`: File name to be added into the bucket. - /// * `raw_error`: Raw error message. - pub fn new_object_storage_cannot_put_file_into_bucket_error( - event_details: EventDetails, - bucket_name: String, - file_name: String, - raw_error: CommandError, - ) -> EngineError { - let message = format!( - "Error, cannot put file `{}` into object storage bucket `{}`.", - file_name.to_string(), - bucket_name.to_string(), - ); - - EngineError::new( - event_details, - Tag::ObjectStorageCannotPutFileIntoBucket, - message.to_string(), - message.to_string(), - Some(raw_error), - None, - None, - ) - } - /// Creates new error while trying to get cluster. /// /// Arguments: @@ -2691,4 +2655,167 @@ impl EngineError { None, ) } + + /// Creates new error, object storage bucket name is not valid. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Errored bucket name. + pub fn new_object_storage_bucket_name_is_invalid(event_details: EventDetails, bucket_name: String) -> EngineError { + let message = format!("Error: bucket name `{}` is not valid.", bucket_name); + + EngineError::new( + event_details, + Tag::ObjectStorageInvalidBucketName, + message.to_string(), + message.to_string(), + None, + None, + Some("Check your cloud provider documentation to know bucket naming rules.".to_string()), + ) + } + + /// Creates new object storage cannot create bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_create_bucket_error( + event_details: EventDetails, + bucket_name: String, + raw_error: ObjectStorageError, + ) -> EngineError { + let message = format!( + "Error, cannot create object storage bucket `{}`.", + bucket_name.to_string(), + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotCreateBucket, + message.to_string(), + message.to_string(), + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new object storage cannot put file into bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `file_name`: File name to be added into the bucket. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_put_file_into_bucket_error( + event_details: EventDetails, + bucket_name: String, + file_name: String, + raw_error: ObjectStorageError, + ) -> EngineError { + let message = format!( + "Error, cannot put file `{}` into object storage bucket `{}`.", + file_name.to_string(), + bucket_name.to_string(), + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotPutFileIntoBucket, + message.to_string(), + message.to_string(), + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new object storage cannot empty object storage bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_empty_bucket( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error while trying to empty object storage bucket `{}`.", + bucket_name.to_string(), + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotEmptyBucket, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + + /// Creates new object storage cannot tag bucket error. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_tag_bucket_error( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error while trying to tag object storage bucket `{}`.", + bucket_name.to_string(), + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotTagBucket, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + + /// Creates new object storage cannot activate bucket versioning error. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_activate_bucket_versioning_error( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error while trying to activate versioning for object storage bucket `{}`.", + bucket_name.to_string(), + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotActivateBucketVersioning, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } } diff --git a/src/object_storage/errors.rs b/src/object_storage/errors.rs new file mode 100644 index 00000000..3a80ec28 --- /dev/null +++ b/src/object_storage/errors.rs @@ -0,0 +1,61 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum ObjectStorageError { + #[error("Invalid bucket name error for `{bucket_name:?}`: {raw_error_message:?}.")] + InvalidBucketName { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot create bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotCreateBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot delete bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotDeleteBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot empty bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotEmptyBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot tag bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotTagBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot get workspace error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotGetWorkspace { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot create file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotCreateFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot open file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotOpenFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot read file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotReadFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot get object file `{file_name:?}` error in `{bucket_name:?}`: {raw_error_message:?}.")] + CannotGetObjectFile { + bucket_name: String, + file_name: String, + raw_error_message: String, + }, + #[error("Cannot upload file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotUploadFile { + bucket_name: String, + raw_error_message: String, + }, +} diff --git a/src/object_storage/mod.rs b/src/object_storage/mod.rs index 329c6ae4..baa4de6a 100644 --- a/src/object_storage/mod.rs +++ b/src/object_storage/mod.rs @@ -1,9 +1,10 @@ use serde::{Deserialize, Serialize}; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; use crate::models::{Context, StringPath}; +use crate::object_storage::errors::ObjectStorageError; use std::fs::File; +pub mod errors; pub mod s3; pub mod scaleway_object_storage; pub mod spaces; @@ -16,22 +17,16 @@ pub trait ObjectStorage { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), EngineError>; - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError>; - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError>; - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError>; - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError>; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::ObjectStorage(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } + fn is_valid(&self) -> Result<(), ObjectStorageError>; + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError>; + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError>; + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError>; + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError>; } #[derive(Serialize, Deserialize, Clone)] diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 68dbf09e..04786390 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -14,8 +14,8 @@ use rusoto_s3::{ }; use tokio::io; -use crate::error::{EngineError, EngineErrorCause}; use crate::models::{Context, StringPath}; +use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime::block_on; @@ -68,9 +68,12 @@ impl S3 { S3Client::new_with_client(client, region) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } Ok(()) @@ -90,15 +93,8 @@ impl S3 { .is_ok() } - fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to empty S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -131,14 +127,10 @@ impl S3 { ..Default::default() }), ) { - let message = format!( - "While trying to empty S3 bucket `{}` region `{}`, cannot delete content: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -163,20 +155,13 @@ impl ObjectStorage for S3 { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ObjectStorageError> { // TODO check valid credentials Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to create S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -192,14 +177,10 @@ impl ObjectStorage for S3 { }), ..Default::default() })) { - let message = format!( - "While trying to create S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } let creation_date: DateTime = Utc::now(); @@ -220,43 +201,25 @@ impl ObjectStorage for S3 { }, ..Default::default() })) { - let message = format!( - "While trying to add tags on S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotTagBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } if self.bucket_versioning_activated { - if let Err(e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { + // Not blocking if fails for the ttime being + let _ = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { bucket: bucket_name.to_string(), ..Default::default() - })) { - let message = format!( - "While trying to activate versioning on S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - } + })); } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -270,47 +233,37 @@ impl ObjectStorage for S3 { expected_bucket_owner: None, })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to delete S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/s3/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -344,43 +297,27 @@ impl ObjectStorage for S3 { let file = File::open(path).unwrap(); Ok((file_path, file)) } - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotCreateFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - Err(e) => { - let message = format!( - "While trying to get object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -390,30 +327,20 @@ impl ObjectStorage for S3 { body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), expected_bucket_owner: None, ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -424,7 +351,7 @@ mod tests { struct TestCase<'a> { bucket_name_input: &'a str, - expected_output: Result<(), Option>, + expected_output: Result<(), ObjectStorageError>, description: &'a str, } @@ -434,7 +361,10 @@ mod tests { let test_cases: Vec = vec![ TestCase { bucket_name_input: "", - expected_output: Err(Some(String::from("bucket name cannot be empty"))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "".to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }), description: "bucket name is empty", }, TestCase { diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index 07982e9b..49348702 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -3,10 +3,10 @@ use std::fs::File; use std::path::Path; use crate::cloud_provider::scaleway::application::ScwZone; -use crate::error::{EngineError, EngineErrorCause}; use crate::models::{Context, StringPath}; use crate::object_storage::{Kind, ObjectStorage}; +use crate::object_storage::errors::ObjectStorageError; use crate::runtime::block_on; use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; use rusoto_credential::StaticProvider; @@ -79,33 +79,31 @@ impl ScalewayOS { format!("https://s3.{}.scw.cloud", self.zone.region().to_string()) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } // From Scaleway doc // Note: The SSL certificate does not support bucket names containing additional dots (.). // You may receive a SSL warning in your browser when accessing a bucket like my.bucket.name.s3.fr-par.scw.cloud // and it is recommended to use dashes (-) instead: my-bucket-name.s3.fr-par.scw.cloud. if bucket_name.contains('.') { - return Err(Some( - "bucket name cannot contain '.' in its name, recommended to use '-' instead".to_string(), - )); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }); } Ok(()) } - fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to empty object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -138,14 +136,10 @@ impl ScalewayOS { ..Default::default() }), ) { - let message = format!( - "While trying to empty object-storage bucket `{}` region `{}`, cannot delete content: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -180,20 +174,13 @@ impl ObjectStorage for ScalewayOS { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { - todo!() + fn is_valid(&self) -> Result<(), ObjectStorageError> { + Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to create object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -211,14 +198,10 @@ impl ObjectStorage for ScalewayOS { }), ..Default::default() })) { - let message = format!( - "While trying to create object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } let creation_date: DateTime = Utc::now(); @@ -240,46 +223,29 @@ impl ObjectStorage for ScalewayOS { }, ..Default::default() })) { - let message = format!( - "While trying to add tags on object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotTagBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } if self.bucket_versioning_activated { - if let Err(e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { + if let Err(_e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { bucket: bucket_name.to_string(), ..Default::default() })) { - let message = format!( - "While trying to activate versioning on object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); // TODO(benjaminch): to be investigated, versioning seems to fail - // Err(self.engine_error(EngineErrorCause::Internal, message)) + // Not blocking if it fails + // Err(self.engine_error(ObjectStorageErrorCause::Internal, message)) } } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -298,49 +264,41 @@ impl ObjectStorage for ScalewayOS { })) { Ok(_) => Ok(()), Err(e) => { - let message = format!( - "While trying to delete object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } }, BucketDeleteStrategy::Empty => Ok(()), // Do not delete the bucket } } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/scaleway_os/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -374,43 +332,31 @@ impl ObjectStorage for ScalewayOS { Ok((file_path, file)) } Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } }, Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) + return Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } } - Err(e) => { - let message = format!( - "While trying to get object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -420,29 +366,19 @@ impl ObjectStorage for ScalewayOS { body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -453,7 +389,7 @@ mod tests { struct TestCase<'a> { bucket_name_input: &'a str, - expected_output: Result<(), Option>, + expected_output: Result<(), ObjectStorageError>, description: &'a str, } @@ -463,14 +399,19 @@ mod tests { let test_cases: Vec = vec![ TestCase { bucket_name_input: "", - expected_output: Err(Some(String::from("bucket name cannot be empty"))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "".to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }), description: "bucket name is empty", }, TestCase { bucket_name_input: "containing.dot", - expected_output: Err(Some(String::from( - "bucket name cannot contain '.' in its name, recommended to use '-' instead", - ))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "containing.dot".to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }), description: "bucket name contains dot char", }, TestCase { diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index 13f1ee92..d5bfb8be 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -12,8 +12,8 @@ use rusoto_s3::{ use tokio::io; use crate::cloud_provider::digitalocean::application::DoRegion; -use crate::error::{EngineError, EngineErrorCause}; use crate::models::{Context, StringPath}; +use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime; use crate::runtime::block_on; @@ -74,29 +74,27 @@ impl Spaces { S3Client::new_with_client(client, region) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } if bucket_name.contains('.') { - return Err(Some( - "bucket name cannot contain '.' in its name, recommended to use '-' instead".to_string(), - )); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }); } Ok(()) } - pub fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + pub fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -129,12 +127,10 @@ impl Spaces { ..Default::default() }), ) { - let message = format!( - "While trying to delete object-storage bucket `{}`, cannot delete content: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -156,7 +152,7 @@ impl Spaces { bucket_name: T, object_key: S, download_into_file_path: X, - ) -> Result + ) -> Result where T: Into, S: Into, @@ -171,10 +167,12 @@ impl Spaces { let client = Client::new_with(credentials, HttpClient::new().unwrap()); let s3_client = S3Client::new_with_client(client, region.clone()); + let bucket_name: String = bucket_name.into(); + let object_key: String = object_key.into(); let object = s3_client .get_object(GetObjectRequest { - bucket: bucket_name.into(), - key: object_key.into(), + bucket: bucket_name.to_string(), + key: object_key.to_string(), ..Default::default() }) .await; @@ -195,12 +193,22 @@ impl Spaces { match file { Ok(mut created_file) => match io::copy(&mut body, &mut created_file).await { Ok(_) => Ok(File::open(download_into_file_path.as_ref()).unwrap()), - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -222,20 +230,13 @@ impl ObjectStorage for Spaces { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ObjectStorageError> { // TODO check valid credentials Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "error while trying to create object-storage bucket `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -250,18 +251,16 @@ impl ObjectStorage for Spaces { bucket: bucket_name.to_string(), ..Default::default() })) { - let message = format!( - "error while trying to create object-storage bucket `{}`: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { let s3_client = self.get_s3_client(); // make sure to delete all bucket content before trying to delete the bucket @@ -279,36 +278,38 @@ impl ObjectStorage for Spaces { })) { Ok(_) => Ok(()), Err(e) => { - let message = format!( - "While trying to delete object-storage bucket, name `{}`: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } }, BucketDeleteStrategy::Empty => Ok(()), // Do not delete the bucket }; } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/spaces/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -316,13 +317,7 @@ impl ObjectStorage for Spaces { let result = retry::retry(Fibonacci::from_millis(3000).take(5), || { match runtime::block_on(self.get_object(bucket_name, object_key, file_path.as_str())) { Ok(file) => OperationResult::Ok(file), - Err(err) => { - debug!("{:?}", err); - - warn!("Can't download object '{}/{}'. Let's retry...", bucket_name, object_key); - - OperationResult::Retry(err) - } + Err(err) => OperationResult::Retry(err), } }); @@ -331,28 +326,27 @@ impl ObjectStorage for Spaces { Err(err) => { return match err { Error::Operation { error, .. } => Err(error), - Error::Internal(err) => Err(self.engine_error(EngineErrorCause::Internal, err)), + Error::Internal(err) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: err.to_string(), + }), }; } }; match file { Ok(file) => Ok((file_path, file)), - Err(err) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", err))), + Err(err) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `digitalocean-api-rs` once we'll made the auo-generated lib - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -362,26 +356,19 @@ impl ObjectStorage for Spaces { body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}`, error: {}", - object_key, bucket_name, e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index dd2fee90..587b56ab 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,6 +1,7 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; +use qovery_engine::build_platform::Image; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use test_utilities::utilities::logger; From 69364c30dca306370beab89d3f0ae53caf449303 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 21 Mar 2022 11:41:51 +0100 Subject: [PATCH 38/85] Remove useless intermediate struct --- src/models.rs | 8 --- src/transaction.rs | 85 ++++++++++-------------- test_utilities/src/common.rs | 28 ++++---- tests/aws/aws_databases.rs | 22 +++--- tests/aws/aws_environment.rs | 56 ++++++++-------- tests/aws/aws_whole_enchilada.rs | 3 +- tests/digitalocean/do_databases.rs | 22 +++--- tests/digitalocean/do_environment.rs | 52 +++++++-------- tests/digitalocean/do_whole_enchilada.rs | 3 +- tests/scaleway/scw_container_registry.rs | 1 - tests/scaleway/scw_databases.rs | 22 +++--- tests/scaleway/scw_environment.rs | 54 +++++++-------- tests/scaleway/scw_whole_enchilada.rs | 3 +- 13 files changed, 164 insertions(+), 195 deletions(-) diff --git a/src/models.rs b/src/models.rs index 84066c4e..eff991cc 100644 --- a/src/models.rs +++ b/src/models.rs @@ -77,14 +77,6 @@ impl Display for QoveryIdentifier { } } -#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub enum EnvironmentAction { - Environment(TargetEnvironment), -} - -pub type TargetEnvironment = Environment; -pub type FailoverEnvironment = Environment; - #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct Environment { pub execution_id: String, diff --git a/src/transaction.rs b/src/transaction.rs index a8a6b578..e2fb863b 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -7,8 +7,7 @@ use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EventMessage}; use crate::logger::{LogLevel, Logger}; use crate::models::{ - Action, Environment, EnvironmentAction, EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, - ProgressScope, + Action, Environment, EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; pub struct Transaction<'a> { @@ -65,9 +64,9 @@ impl<'a> Transaction<'a> { Ok(()) } - pub fn deploy_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { + pub fn deploy_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { self.deploy_environment_with_options( - environment_action, + environment, DeploymentOption { force_build: false, force_push: false, @@ -77,25 +76,25 @@ impl<'a> Transaction<'a> { pub fn deploy_environment_with_options( &mut self, - environment_action: &'a EnvironmentAction, + environment: &'a Environment, option: DeploymentOption, ) -> Result<(), EnvironmentError> { // add build step - self.steps.push(Step::BuildEnvironment(environment_action, option)); + self.steps.push(Step::BuildEnvironment(environment, option)); // add deployment step - self.steps.push(Step::DeployEnvironment(environment_action)); + self.steps.push(Step::DeployEnvironment(environment)); Ok(()) } - pub fn pause_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { - self.steps.push(Step::PauseEnvironment(environment_action)); + pub fn pause_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { + self.steps.push(Step::PauseEnvironment(environment)); Ok(()) } - pub fn delete_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { - self.steps.push(Step::DeleteEnvironment(environment_action)); + pub fn delete_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { + self.steps.push(Step::DeleteEnvironment(environment)); Ok(()) } @@ -185,7 +184,7 @@ impl<'a> Transaction<'a> { // Warning: This function function does not revert anything, it just there to grab info from kube and services if it fails // FIXME: Cleanup this, qe_environment should not be rebuilt at this step - fn rollback_environment(&self, environment_action: &EnvironmentAction) -> Result<(), RollbackError> { + fn rollback_environment(&self, environment: &Environment) -> Result<(), RollbackError> { let registry_info = self.engine.container_registry().registry_info(); let qe_environment = |environment: &Environment| { @@ -199,32 +198,28 @@ impl<'a> Transaction<'a> { qe_environment }; - match environment_action { - EnvironmentAction::Environment(te) => { - // revert changes but there is no failover environment - let target_qe_environment = qe_environment(te); + // revert changes but there is no failover environment + let target_qe_environment = qe_environment(environment); - let action = match te.action { - Action::Create => self - .engine - .kubernetes() - .deploy_environment_error(&target_qe_environment), - Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), - Action::Delete => self - .engine - .kubernetes() - .delete_environment_error(&target_qe_environment), - Action::Nothing => Ok(()), - }; + let action = match environment.action { + Action::Create => self + .engine + .kubernetes() + .deploy_environment_error(&target_qe_environment), + Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), + Action::Delete => self + .engine + .kubernetes() + .delete_environment_error(&target_qe_environment), + Action::Nothing => Ok(()), + }; - let _ = match action { - Ok(_) => {} - Err(err) => return Err(RollbackError::CommitError(err)), - }; + let _ = match action { + Ok(_) => {} + Err(err) => return Err(RollbackError::CommitError(err)), + }; - Err(RollbackError::NoFailoverEnvironment) - } - } + Err(RollbackError::NoFailoverEnvironment) } pub fn commit(mut self) -> TransactionResult { @@ -264,16 +259,12 @@ impl<'a> Transaction<'a> { } }; } - Step::BuildEnvironment(environment_action, option) => { + Step::BuildEnvironment(target_environment, option) => { if (self.is_transaction_aborted)() { return TransactionResult::Canceled; } // build applications - let target_environment = match environment_action { - EnvironmentAction::Environment(te) => te, - }; - match self.build_and_push_applications(target_environment, &option) { Ok(apps) => apps, Err(engine_err) => { @@ -407,14 +398,10 @@ impl<'a> Transaction<'a> { } } - fn commit_environment(&self, environment_action: &EnvironmentAction, action_fn: F) -> TransactionResult + fn commit_environment(&self, target_environment: &Environment, action_fn: F) -> TransactionResult where F: Fn(&crate::cloud_provider::environment::Environment) -> Result<(), EngineError>, { - let target_environment = match environment_action { - EnvironmentAction::Environment(te) => te, - }; - let registry_info = self.engine.container_registry().registry_info(); let qe_environment = target_environment.to_qe_environment( self.engine.context(), @@ -566,10 +553,10 @@ pub enum Step<'a> { CreateKubernetes, DeleteKubernetes, PauseKubernetes, - BuildEnvironment(&'a EnvironmentAction, DeploymentOption), - DeployEnvironment(&'a EnvironmentAction), - PauseEnvironment(&'a EnvironmentAction), - DeleteEnvironment(&'a EnvironmentAction), + BuildEnvironment(&'a Environment, DeploymentOption), + DeployEnvironment(&'a Environment), + PauseEnvironment(&'a Environment), + DeleteEnvironment(&'a Environment), } impl<'a> Step<'a> { diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 029ec8db..b3631da5 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -6,8 +6,8 @@ use chrono::Utc; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::models::{ - Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, - GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, + Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, Environment, GitCredentials, + Port, Protocol, Route, Router, Storage, StorageType, }; use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; @@ -70,19 +70,19 @@ pub trait Cluster { pub trait Infrastructure { fn deploy_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn pause_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn delete_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; @@ -91,13 +91,13 @@ pub trait Infrastructure { impl Infrastructure for Environment { fn deploy_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let _ = tx.deploy_environment_with_options( - &environment_action, + &environment, DeploymentOption { force_build: true, force_push: true, @@ -109,24 +109,24 @@ impl Infrastructure for Environment { fn pause_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.pause_environment(&environment_action); + let _ = tx.pause_environment(&environment); tx.commit() } fn delete_environment( &self, - environment_action: &EnvironmentAction, + environment: &Environment, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.delete_environment(&environment_action); + let _ = tx.delete_environment(&environment); tx.commit() } @@ -1061,8 +1061,8 @@ pub fn test_db( let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let (localisation, kubernetes_version) = match provider_kind { Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), @@ -1369,7 +1369,7 @@ pub fn cluster_test( minor_boot_version: u8, cluster_domain: &ClusterDomain, vpc_network_mode: Option, - environment_to_deploy: Option<&EnvironmentAction>, + environment_to_deploy: Option<&Environment>, ) -> String { init(); diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index c1262911..8a001db5 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -2,9 +2,7 @@ extern crate test_utilities; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{ - Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; @@ -64,8 +62,8 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -119,8 +117,8 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -199,8 +197,8 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_for_deletion = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -317,12 +315,12 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let ea_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 59f489e3..325b6545 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; -use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -61,8 +61,8 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_for_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -111,7 +111,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { .as_str(), ); - let ea = EnvironmentAction::Environment(environment.clone()); + let ea = environment.clone(); let selector = format!("appId={}", environment.clone().applications[0].id); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); @@ -279,8 +279,8 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); @@ -353,8 +353,8 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -424,8 +424,8 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -476,8 +476,8 @@ fn deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -547,8 +547,8 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -631,9 +631,9 @@ fn redeploy_same_app_with_ebs() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea2 = EnvironmentAction::Environment(environment_redeploy.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea2 = environment_redeploy.clone(); + let ea_delete = environment_delete.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -730,9 +730,9 @@ fn deploy_a_not_working_environment_and_after_working_environment() { environment_for_delete.action = Action::Delete; // environment actions - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let ea = environment.clone(); + let ea_not_working = environment_for_not_working.clone(); + let ea_delete = environment_for_delete.clone(); let ret = environment_for_not_working.deploy_environment( &ea_not_working, @@ -816,10 +816,10 @@ fn deploy_ok_fail_fail_ok_environment() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let ea_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let ea_delete = EnvironmentAction::Environment(delete_env.clone()); + let ea = environment.clone(); + let ea_not_working_1 = not_working_env_1.clone(); + let ea_not_working_2 = not_working_env_2.clone(); + let ea_delete = delete_env.clone(); // OK let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); @@ -891,8 +891,8 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(delete_env.clone()); + let ea = environment.clone(); + let ea_delete = delete_env.clone(); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); @@ -944,8 +944,8 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index 9de34a04..c0775af1 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -2,7 +2,6 @@ use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::WithNatGateways; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; use std::str::FromStr; use test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; @@ -33,7 +32,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); engine_run_test(|| { cluster_test( diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index a975987f..9da21447 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -2,9 +2,7 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{ - Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, @@ -64,8 +62,8 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -122,8 +120,8 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let ret = environment.deploy_environment(&env_action.clone(), logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -205,8 +203,8 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_deletion = environment_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -342,12 +340,12 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let env_action_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 09a45edf..d53888e4 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -60,8 +60,8 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -117,8 +117,8 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); @@ -168,7 +168,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { .as_str(), ); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); @@ -283,8 +283,8 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -339,8 +339,8 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -411,8 +411,8 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -497,9 +497,9 @@ fn digitalocean_doks_redeploy_same_app() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_redeploy = environment_redeploy.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -602,9 +602,9 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme environment_for_delete.action = Action::Delete; // environment actions - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_not_working = environment_for_not_working.clone(); + let env_action_delete = environment_for_delete.clone(); let result = environment_for_not_working.deploy_environment( &env_action_not_working, @@ -688,10 +688,10 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_not_working_1 = not_working_env_1.clone(); + let env_action_not_working_2 = not_working_env_2.clone(); + let env_action_delete = delete_env.clone(); // OK let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); @@ -772,8 +772,8 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_delete = delete_env.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); @@ -829,8 +829,8 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index 4c14c90a..f4e3f0dd 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -1,7 +1,6 @@ use ::function_name::named; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -29,7 +28,7 @@ fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); engine_run_test(|| { cluster_test( diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index 587b56ab..dd2fee90 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,7 +1,6 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; -use qovery_engine::build_platform::Image; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use test_utilities::utilities::logger; diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index e2f1ef13..aeadd344 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -2,9 +2,7 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{ - Action, CloneForTest, Database, DatabaseKind, DatabaseMode, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, generate_password, get_pods, get_svc_name, init, is_pod_restarted_env, @@ -68,8 +66,8 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -128,8 +126,8 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -213,8 +211,8 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_deletion = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -347,12 +345,12 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let env_action_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index c4df902f..56168def 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -61,8 +61,8 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -121,8 +121,8 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); @@ -179,7 +179,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { .as_str(), ); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); @@ -296,8 +296,8 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -354,8 +354,8 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -429,8 +429,8 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -490,7 +490,7 @@ fn deploy_a_working_environment_and_pause_it() { .as_str(), ); - let ea = EnvironmentAction::Environment(environment.clone()); + let ea = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); @@ -607,9 +607,9 @@ fn scaleway_kapsule_redeploy_same_app() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_redeploy = environment_redeploy.clone(); + let env_action_delete = environment_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -714,9 +714,9 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen environment_for_delete.action = Action::Delete; // environment actions - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_not_working = environment_for_not_working.clone(); + let env_action_delete = environment_for_delete.clone(); let result = environment_for_not_working.deploy_environment( &env_action_not_working, @@ -805,10 +805,10 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_not_working_1 = not_working_env_1.clone(); + let env_action_not_working_2 = not_working_env_2.clone(); + let env_action_delete = delete_env.clone(); // OK let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); @@ -892,8 +892,8 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_delete = delete_env.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); @@ -949,8 +949,8 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index 641dd2df..899b4a19 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,7 +1,6 @@ use ::function_name::named; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -27,7 +26,7 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); engine_run_test(|| { cluster_test( From c42463e77d0750a4421605bc0bc9d180dd4106bf Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 21 Mar 2022 15:15:19 +0100 Subject: [PATCH 39/85] Fix does image exist for scaleway --- src/container_registry/scaleway_container_registry.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index c2df013d..0d920d3c 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -382,7 +382,11 @@ impl ContainerRegistry for ScalewayCR { name: image.name().clone(), tags: vec![image.tag.clone()], }; - self.context.docker.does_image_exist_remotely(&image).is_ok() + match self.context.docker.does_image_exist_remotely(&image) { + Ok(true) => true, + Ok(false) => false, + Err(_) => false, + } } fn logger(&self) -> &dyn Logger { From 96867446ae2b017721f2ab9ff9dce62476a31c78 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 21 Mar 2022 16:30:12 +0100 Subject: [PATCH 40/85] Fix build timeout --- src/cmd/command.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 1366eb65..660fd048 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -89,8 +89,14 @@ impl<'a> CommandKiller<'a> { let is_canceled = Self::from_cancelable(is_canceled); CommandKiller { should_abort: Box::new(move || { - (is_canceled.should_abort)()?; - (has_timeout.should_abort)()?; + if let Some(reason) = (has_timeout.should_abort)() { + return Some(reason); + } + + if let Some(reason) = (is_canceled.should_abort)() { + return Some(reason); + } + None }), } From d888c56af5a9cd5f19cfdfb8665c2458734124af Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 21 Mar 2022 22:13:18 +0100 Subject: [PATCH 41/85] fix stderr reading --- src/cmd/command.rs | 83 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 61 insertions(+), 22 deletions(-) diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 660fd048..3e886560 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -5,17 +5,11 @@ use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; use crate::cmd::command::CommandError::{ExecutionError, ExitStatusError, Killed, TimeoutError}; -use crate::cmd::command::CommandOutputType::{STDERR, STDOUT}; use itertools::Itertools; use std::time::{Duration, Instant}; use timeout_readwrite::TimeoutReader; -enum CommandOutputType { - STDOUT(Result), - STDERR(Result), -} - #[derive(thiserror::Error, Debug)] pub enum CommandError { #[error("Error while executing command")] @@ -173,38 +167,83 @@ impl QoveryCommand { .map_err(ExecutionError)?; // Read stdout/stderr until timeout is reached - let reader_timeout = std::time::Duration::from_secs(5); + let reader_timeout = std::time::Duration::from_secs(1); let stdout = cmd_handle.stdout.take().ok_or(ExecutionError(Error::new( ErrorKind::BrokenPipe, "Cannot get stdout for command", )))?; - let stdout_reader = BufReader::new(TimeoutReader::new(stdout, reader_timeout)) - .lines() - .map(STDOUT); + let mut stdout_reader = BufReader::new(TimeoutReader::new(stdout, reader_timeout)).lines(); let stderr = cmd_handle.stderr.take().ok_or(ExecutionError(Error::new( ErrorKind::BrokenPipe, "Cannot get stderr for command", )))?; - let stderr_reader = BufReader::new(TimeoutReader::new( + let mut stderr_reader = BufReader::new(TimeoutReader::new( stderr, std::time::Duration::from_secs(0), // don't block on stderr )) - .lines() - .map(STDERR); - - for line in stdout_reader.interleave(stderr_reader) { - match line { - STDOUT(Err(ref err)) | STDERR(Err(ref err)) if err.kind() == ErrorKind::TimedOut => {} - STDOUT(Ok(line)) => stdout_output(line), - STDERR(Ok(line)) => stderr_output(line), - STDOUT(Err(err)) => error!("Error on stdout of cmd {:?}: {:?}", self.command, err), - STDERR(Err(err)) => error!("Error on stderr of cmd {:?}: {:?}", self.command, err), - } + .lines(); + let mut should_exit_loop = false; + while !should_exit_loop { + // We should abort and kill the process if abort_notifier.should_abort().is_some() { break; } + + // Read on stdout first + while !should_exit_loop { + let line = if let Some(line) = stdout_reader.next() { + line + } else { + // Stdout has been closed + should_exit_loop = true; + break; + }; + + match line { + Err(ref err) if err.kind() == ErrorKind::TimedOut => break, + Ok(line) => stdout_output(line), + Err(err) => { + error!("Error on stdout of cmd {:?}: {:?}", self.command, err); + should_exit_loop = true; + break; + } + } + + // Should we abort and kill the process + if abort_notifier.should_abort().is_some() { + should_exit_loop = true; + break; + } + } + + // Read stderr now + while !should_exit_loop { + let line = if let Some(line) = stderr_reader.next() { + line + } else { + // Stderr has been closed + should_exit_loop = true; + break; + }; + + match line { + Err(ref err) if err.kind() == ErrorKind::TimedOut => break, + Ok(line) => stderr_output(line), + Err(err) => { + error!("Error on stderr of cmd {:?}: {:?}", self.command, err); + should_exit_loop = true; + break; + } + } + + // should we abort and kill the process + if abort_notifier.should_abort().is_some() { + should_exit_loop = true; + break; + } + } } // Wait for the process to exit before reaching the timeout From 40064851a629008627a2ff1ab1b7afff01d6ecde Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 22 Mar 2022 08:52:23 +0100 Subject: [PATCH 42/85] Cleanup --- src/cmd/command.rs | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 3e886560..5c92ac37 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -184,21 +184,23 @@ impl QoveryCommand { )) .lines(); - let mut should_exit_loop = false; - while !should_exit_loop { + let mut stdout_closed = false; + let mut stderr_closed = false; + while !stdout_closed || !stderr_closed { // We should abort and kill the process if abort_notifier.should_abort().is_some() { break; } // Read on stdout first - while !should_exit_loop { - let line = if let Some(line) = stdout_reader.next() { - line - } else { - // Stdout has been closed - should_exit_loop = true; - break; + while !stdout_closed { + let line = match stdout_reader.next() { + Some(line) => line, + None => { + // Stdout has been closed + stdout_closed = true; + break; + } }; match line { @@ -206,26 +208,28 @@ impl QoveryCommand { Ok(line) => stdout_output(line), Err(err) => { error!("Error on stdout of cmd {:?}: {:?}", self.command, err); - should_exit_loop = true; + stdout_closed = true; break; } } // Should we abort and kill the process if abort_notifier.should_abort().is_some() { - should_exit_loop = true; + stdout_closed = true; + stderr_closed = true; break; } } // Read stderr now - while !should_exit_loop { - let line = if let Some(line) = stderr_reader.next() { - line - } else { - // Stderr has been closed - should_exit_loop = true; - break; + while !stderr_closed { + let line = match stderr_reader.next() { + Some(line) => line, + None => { + // Stdout has been closed + stderr_closed = true; + break; + } }; match line { @@ -233,14 +237,15 @@ impl QoveryCommand { Ok(line) => stderr_output(line), Err(err) => { error!("Error on stderr of cmd {:?}: {:?}", self.command, err); - should_exit_loop = true; + stderr_closed = true; break; } } // should we abort and kill the process if abort_notifier.should_abort().is_some() { - should_exit_loop = true; + stdout_closed = true; + stderr_closed = true; break; } } From 97f5240d2783668af323847b1363ed3533762b44 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 22 Mar 2022 08:55:08 +0100 Subject: [PATCH 43/85] Clippy --- src/cmd/command.rs | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 5c92ac37..d7f67e85 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -25,19 +25,6 @@ pub enum CommandError { Killed(String), } -impl CommandError { - pub fn to_string(&self) -> String { - match self { - ExecutionError(err) => format!("Execution error: {}", err.to_string()), - ExitStatusError(exit_status) => { - format!("Execution error: exit status {}", exit_status.to_string()) - } - TimeoutError(msg) => format!("Execution error: timeout, {}", msg.to_string()), - Killed(msg) => format!("Execution error: killed, {}", msg.to_string()), - } - } -} - #[derive(Debug, Clone)] pub enum AbortReason { Timeout(Duration), @@ -168,16 +155,16 @@ impl QoveryCommand { // Read stdout/stderr until timeout is reached let reader_timeout = std::time::Duration::from_secs(1); - let stdout = cmd_handle.stdout.take().ok_or(ExecutionError(Error::new( - ErrorKind::BrokenPipe, - "Cannot get stdout for command", - )))?; + let stdout = cmd_handle + .stdout + .take() + .ok_or_else(|| ExecutionError(Error::new(ErrorKind::BrokenPipe, "Cannot get stdout for command")))?; let mut stdout_reader = BufReader::new(TimeoutReader::new(stdout, reader_timeout)).lines(); - let stderr = cmd_handle.stderr.take().ok_or(ExecutionError(Error::new( - ErrorKind::BrokenPipe, - "Cannot get stderr for command", - )))?; + let stderr = cmd_handle + .stderr + .take() + .ok_or_else(|| ExecutionError(Error::new(ErrorKind::BrokenPipe, "Cannot get stderr for command")))?; let mut stderr_reader = BufReader::new(TimeoutReader::new( stderr, std::time::Duration::from_secs(0), // don't block on stderr @@ -304,7 +291,7 @@ impl QoveryCommand { // return the output of "binary_name" --version pub fn run_version_command_for(binary_name: &str) -> String { let mut output_from_cmd = String::new(); - let mut cmd = QoveryCommand::new(binary_name, &vec!["--version"], Default::default()); + let mut cmd = QoveryCommand::new(binary_name, &["--version"], Default::default()); let _ = cmd.exec_with_output(&mut |r_out| output_from_cmd.push_str(&r_out), &mut |r_err| { error!("Error executing {}: {}", binary_name, r_err) }); From 8060480848c12b48bff6a6196a2a7575f45ea7f3 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 22 Mar 2022 10:34:21 +0100 Subject: [PATCH 44/85] feat: introdce dedicated CR errors types (#653) --- src/cmd/docker.rs | 6 - src/container_registry/docr.rs | 296 +++++------------- src/container_registry/ecr.rs | 170 +++------- src/container_registry/errors.rs | 68 ++++ src/container_registry/mod.rs | 29 +- .../scaleway_container_registry.rs | 168 +++------- src/engine.rs | 37 ++- src/errors/mod.rs | 31 +- src/events/mod.rs | 12 +- src/models.rs | 2 +- src/transaction.rs | 12 +- test_utilities/src/aws.rs | 3 +- test_utilities/src/digitalocean.rs | 4 +- test_utilities/src/scaleway.rs | 11 +- tests/scaleway/scw_container_registry.rs | 5 - 15 files changed, 313 insertions(+), 541 deletions(-) create mode 100644 src/container_registry/errors.rs diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 6bcfe9f7..0d1c7965 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -1,6 +1,4 @@ use crate::cmd::command::{CommandError, CommandKiller, QoveryCommand}; -use crate::errors::EngineError; -use crate::events::EventDetails; use std::path::Path; use std::process::ExitStatus; use url::Url; @@ -474,10 +472,6 @@ where } } -pub fn to_engine_error(event_details: &EventDetails, error: DockerError) -> EngineError { - EngineError::new_docker_error(event_details.clone(), error) -} - // start a local registry to run this test // docker run --rm -ti -p 5000:5000 --name registry registry:2 #[cfg(feature = "test-with-docker")] diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index d4bf8e61..aec59d1b 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -2,14 +2,11 @@ extern crate digitalocean; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; -use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, EngineError, Kind}; -use crate::errors::CommandError; -use crate::events::{EngineEvent, EventDetails, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::models::{Context, Listen, Listener, Listeners}; use crate::utilities; use url::Url; @@ -28,17 +25,10 @@ pub struct DOCR { pub id: String, pub registry_info: ContainerRegistryInfo, pub listeners: Listeners, - pub logger: Box, } impl DOCR { - pub fn new( - context: Context, - id: &str, - name: &str, - api_key: &str, - logger: Box, - ) -> Result { + pub fn new(context: Context, id: &str, name: &str, api_key: &str) -> Result { let registry_name = name.to_string(); let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); let _ = registry.set_username(&api_key); @@ -56,23 +46,17 @@ impl DOCR { api_key: api_key.into(), id: id.into(), listeners: vec![], - logger, registry_info, }; - let event_details = cr.get_event_details(); - if cr.context.docker.login(&cr.registry_info.endpoint).is_err() { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); + return Err(ContainerRegistryError::InvalidCredentials); } + Ok(cr) } - fn create_registry(&self, registry_name: &str) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - + fn create_registry(&self, registry_name: &str) -> Result<(), ContainerRegistryError> { // DOCR does not support upper cases let registry_name = registry_name.to_lowercase(); let headers = utilities::get_header_with_bearer(&self.api_key); @@ -96,54 +80,42 @@ impl DOCR { StatusCode::OK => Ok(()), StatusCode::CREATED => Ok(()), status => { - return Err(EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - self.name_with_id(), - registry_name.to_string(), - CommandError::new_from_safe_message(format!( + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( "Bad status code: `{}` returned by the DO registry API for creating DOCR `{}`.", status, registry_name.as_str(), - )), - )); + ), + }); } }, Err(e) => { - return Err(EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - self.name_with_id(), - registry_name.to_string(), - CommandError::new( + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( + "Failed to create DOCR repository `{}`, error: {}.", + registry_name.as_str(), e.to_string(), - Some(format!( - "Failed to create DOCR repository `{}`.", - registry_name.as_str(), - )), ), - )); + }); } } } Err(e) => { - return Err(EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - self.name_with_id(), - registry_name.to_string(), - CommandError::new( + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( + "Failed to create DOCR repository `{}`, error: {}.", + registry_name.as_str(), e.to_string(), - Some(format!( - "Failed to create DOCR repository `{}`.", - registry_name.as_str(), - )), ), - )); + }); } } } - pub fn delete_registry(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - + pub fn delete_registry(&self) -> Result<(), ContainerRegistryError> { let headers = utilities::get_header_with_bearer(&self.api_key); let res = reqwest::blocking::Client::new() .delete(CR_API_PATH) @@ -154,32 +126,25 @@ impl DOCR { Ok(out) => match out.status() { StatusCode::NO_CONTENT => Ok(()), status => { - return Err(EngineError::new_container_registry_delete_repository_error( - event_details.clone(), - "default".to_string(), // DO has only one repository - Some(CommandError::new_from_safe_message(format!( + return Err(ContainerRegistryError::CannotDeleteRegistry { + registry_name: "default".to_string(), + raw_error_message: format!( "Bad status code: `{}` returned by the DO registry API for deleting DOCR.", status, - ))), - )); + ), + }); } }, Err(e) => { - return Err(EngineError::new_container_registry_delete_repository_error( - event_details.clone(), - "default".to_string(), // DO has only one repository - Some(CommandError::new( - e.to_string(), - Some("No response from the Digital Ocean API.".to_string()), - )), - )); + return Err(ContainerRegistryError::CannotDeleteRegistry { + registry_name: "default".to_string(), + raw_error_message: format!("No response from the Digital Ocean API, error: {}", e.to_string()), + }); } } } - pub fn exec_docr_login(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(); - + pub fn exec_docr_login(&self) -> Result<(), ContainerRegistryError> { let mut cmd = QoveryCommand::new( "doctl", &vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], @@ -188,19 +153,11 @@ impl DOCR { match cmd.exec() { Ok(_) => Ok(()), - Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )), + Err(_) => Err(ContainerRegistryError::InvalidCredentials), } } } -impl ToTransmitter for DOCR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } -} - impl ContainerRegistry for DOCR { fn context(&self) -> &Context { &self.context @@ -218,7 +175,7 @@ impl ContainerRegistry for DOCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ContainerRegistryError> { Ok(()) } @@ -226,23 +183,21 @@ impl ContainerRegistry for DOCR { &self.registry_info } - fn create_registry(&self) -> Result<(), EngineError> { + fn create_registry(&self) -> Result<(), ContainerRegistryError> { // Digital Ocean only allow one registry per account... - if let Err(_) = get_current_registry_name(self.api_key.as_str(), self.get_event_details(), self.logger()) { + if let Err(_) = get_current_registry_name(self.api_key.as_str()) { let _ = self.create_registry(self.name())?; } Ok(()) } - fn create_repository(&self, _repository_name: &str) -> Result<(), EngineError> { + fn create_repository(&self, _repository_name: &str) -> Result<(), ContainerRegistryError> { // Nothing to do, DO only allow one registry and create repository on the flight when image are pushed Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { - let event_details = self.get_event_details(); - let headers = utilities::get_header_with_bearer(self.api_key.as_str()); let url = format!( "https://api.digitalocean.com/v2/registry/{}/repositories/{}/tags", @@ -259,40 +214,10 @@ impl ContainerRegistry for DOCR { Ok(output) => match output.status() { StatusCode::OK => output.text(), _ => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_container_registry_image_doesnt_exist( - event_details.clone(), - image.name().to_string(), - Some(CommandError::new_from_safe_message(format!( - "While tyring to get all tags for image: `{}`, maybe this image not exist !", - image.name().to_string() - ))), - ), - None, - ), - ); - return false; } }, Err(_) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_container_registry_image_doesnt_exist( - event_details.clone(), - image.name().to_string(), - Some(CommandError::new_from_safe_message(format!( - "While trying to communicate with DigitalOcean API to retrieve all tags for image `{}`.", - image.name().to_string() - ))), - ), - None, - ), - ); - return false; } }; @@ -310,53 +235,12 @@ impl ContainerRegistry for DOCR { false } - Err(_) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_container_registry_image_doesnt_exist( - event_details.clone(), - image.name().to_string(), - Some(CommandError::new( - out.to_string(), - Some(format!( - "Unable to deserialize tags from DigitalOcean API for image {}", - &image.tag.to_string(), - )), - )), - ), - None, - ), - ); - - false - } + Err(_) => false, } } - _ => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_container_registry_image_doesnt_exist( - event_details.clone(), - image.name().to_string(), - Some(CommandError::new_from_safe_message(format!( - "While retrieving tags for image `{}` Unable to get output from DigitalOcean API.", - image.name().to_string() - ))), - ), - None, - ), - ); - - false - } + _ => false, } } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } } impl Listen for DOCR { @@ -369,7 +253,10 @@ impl Listen for DOCR { } } -pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: &str) -> Result<(), CommandError> { +pub fn subscribe_kube_cluster_to_container_registry( + api_key: &str, + cluster_uuid: &str, +) -> Result<(), ContainerRegistryError> { let headers = utilities::get_header_with_bearer(api_key); let cluster_ids = DoApiSubscribeToKubeCluster { cluster_uuids: vec![cluster_uuid.to_string()], @@ -387,28 +274,31 @@ pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: match res { Ok(output) => match output.status() { StatusCode::NO_CONTENT => Ok(()), - status => Err(CommandError::new_from_safe_message( - format!("Incorrect Status `{}` received from Digital Ocean when tyring to subscribe repository to cluster", status)), - ), + status => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!("Incorrect Status `{}` received from Digital Ocean when tyring to subscribe repository to cluster", status), + }), }, - Err(e) => Err(CommandError::new( - e.to_string(), - Some("Unable to call Digital Ocean when tyring to subscribe repository to cluster".to_string()), - )), + Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!("Unable to call Digital Ocean when tyring to subscribe repository to cluster, error: {}", e.to_string()), + }), } } - Err(e) => Err(CommandError::new( - e.to_string(), - Some("Unable to Serialize digital ocean cluster uuids".to_string()), - )), + Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!( + "Unable to Serialize digital ocean cluster uuids, error: {}", + e.to_string() + ), + }), }; } -pub fn get_current_registry_name( - api_key: &str, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result { +pub fn get_current_registry_name(api_key: &str) -> Result { let headers = utilities::get_header_with_bearer(api_key); let res = reqwest::blocking::Client::new() .get(CR_API_PATH) @@ -423,46 +313,30 @@ pub fn get_current_registry_name( match res_registry { Ok(registry) => Ok(registry.registry.name), - Err(err) => Err(EngineError::new_container_registry_repository_doesnt_exist( - event_details.clone(), - "default".to_string(), // DO has only one repository - Some(CommandError::new( - err.to_string(), - Some( - "An error occurred while deserializing JSON coming from Digital Ocean API.".to_string(), - ), - )), - )), + Err(err) => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Seems there is no registry set (DO has only one registry), error: {}.", + err.to_string() + ), + }), } } - status => { - Err(EngineError::new_container_registry_repository_doesnt_exist( - event_details.clone(), - "default".to_string(), // DO has only one repository - Some(CommandError::new( - format!("Status: {}", status), - Some( - "Incorrect Status received from Digital Ocean when tyring to get container registry." - .to_string(), - ), - )), - )) - } + status => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Incorrect status `{}` received from Digital Ocean when tyring to get container registry.", + status + ), + }), }, - Err(e) => { - let err = EngineError::new_container_registry_repository_doesnt_exist( - event_details.clone(), - "default".to_string(), // DO has only one repository - Some(CommandError::new( - e.to_string(), - Some("Unable to call Digital Ocean when tyring to fetch the container registry name.".to_string()), - )), - ); - - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - Err(err) - } + Err(e) => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Unable to call Digital Ocean when tyring to fetch the container registry name, error: {}.", + e.to_string(), + ), + }), }; } diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 63d9e307..26e3aa6e 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -1,4 +1,3 @@ -use std::borrow::Borrow; use std::str::FromStr; use rusoto_core::{Client, HttpClient, Region, RusotoError}; @@ -10,11 +9,8 @@ use rusoto_ecr::{ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient}; use crate::build_platform::Image; -use crate::cmd::docker::to_engine_error; +use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; use crate::models::{Context, Listen, Listener, Listeners}; use crate::runtime::block_on; use retry::delay::Fixed; @@ -32,7 +28,6 @@ pub struct ECR { region: Region, registry_info: Option, listeners: Listeners, - logger: Box, } impl ECR { @@ -43,8 +38,7 @@ impl ECR { access_key_id: &str, secret_access_key: &str, region: &str, - logger: Box, - ) -> Result { + ) -> Result { let mut cr = ECR { context, id: id.to_string(), @@ -54,7 +48,6 @@ impl ECR { region: Region::from_str(region).unwrap(), registry_info: None, listeners: vec![], - logger, }; let credentials = cr.get_credentials()?; @@ -66,7 +59,7 @@ impl ECR { .context .docker .login(®istry_url) - .map_err(|err| to_engine_error(&cr.get_event_details(), err))?; + .map_err(|_err| ContainerRegistryError::InvalidCredentials)?; let registry_info = ContainerRegistryInfo { endpoint: registry_url, @@ -132,16 +125,7 @@ impl ECR { } } - fn create_repository(&self, repository_name: &str) -> Result { - let event_details = self.get_event_details(); - self.logger().log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Creating ECR repository {}", &repository_name)), - ), - ); - + fn create_repository(&self, repository_name: &str) -> Result { let mut repo_creation_counter = 0; let container_registry_request = DescribeRepositoriesRequest { repository_names: Some(vec![repository_name.to_string()]), @@ -159,92 +143,44 @@ impl ECR { self.ecr_client() .describe_repositories(container_registry_request.clone()), ) { - Ok(x) => { - self.logger().log( - LogLevel::Debug, - EngineEvent::Debug( - event_details.clone(), - EventMessage::new_from_safe(format!("Created {:?} repository", x)), - ), - ); - OperationResult::Ok(()) - } + Ok(_x) => OperationResult::Ok(()), Err(e) => { match e { RusotoError::Service(s) => match s { DescribeRepositoriesError::RepositoryNotFound(_) => { - if repo_creation_counter != 0 { - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Repository {} was not found, {}x retrying...", - &repository_name, &repo_creation_counter - )), - ), - ); - } repo_creation_counter += 1; } - _ => self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - "Error while trying to create repository.".to_string(), - Some(format!("{:?}", s)), - ), - ), - ), + _ => {} }, - _ => self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - "Error while trying to create repository.".to_string(), - Some(format!("{:?}", e)), - ), - ), - ), + _ => {} } - // TODO: This behavior is weird, returning an ok message saying repository has been created in an error ... - let msg = match block_on(self.ecr_client().create_repository(crr.clone())) { - Ok(_) => format!("repository {} created", &repository_name), - Err(err) => format!("{:?}", err), - }; + if let Err(err) = block_on(self.ecr_client().create_repository(crr.clone())) { + return OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + })); + } - OperationResult::Retry(Err(EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - repository_name.to_string(), - self.name_with_id(), - CommandError::new(msg.to_string(), Some("Can't create ECR repository".to_string())), - ))) + OperationResult::Err(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: "unknwon error".to_string(), + })) } } }); match repo_created { - Ok(_) => self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "repository {} created after {} attempt(s)", - &repository_name, repo_creation_counter, - )), - ), - ), + Ok(_) => {} Err(Operation { error, .. }) => return error, Err(retry::Error::Internal(e)) => { - return Err(EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - repository_name.to_string(), - self.name_with_id(), - CommandError::new_from_safe_message(e), - )) + return Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: e.to_string(), + }) } }; @@ -278,38 +214,26 @@ impl ECR { }; match block_on(self.ecr_client().put_lifecycle_policy(plp)) { - Err(err) => Err( - EngineError::new_container_registry_repository_set_lifecycle_policy_error( - event_details.clone(), - repository_name.to_string(), - CommandError::new_from_safe_message(err.to_string()), - ), - ), + Err(err) => Err(ContainerRegistryError::CannotSetRepositoryLifecyclePolicy { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + }), _ => Ok(self.get_repository(repository_name).expect("cannot get repository")), } } - fn get_or_create_repository(&self, repository_name: &str) -> Result { - let event_details = self.get_event_details(); - + fn get_or_create_repository(&self, repository_name: &str) -> Result { // check if the repository already exists let repository = self.get_repository(repository_name); if repository.is_some() { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("ECR repository {} already exists", repository_name)), - ), - ); return Ok(repository.unwrap()); } self.create_repository(repository_name) } - fn get_credentials(&self) -> Result { - let event_details = self.get_event_details(); + fn get_credentials(&self) -> Result { let r = block_on( self.ecr_client() .get_authorization_token(GetAuthorizationTokenRequest::default()), @@ -333,17 +257,11 @@ impl ECR { ) } None => { - return Err(EngineError::new_container_registry_get_credentials_error( - event_details.clone(), - self.name_with_id(), - )); + return Err(ContainerRegistryError::CannotGetCredentials); } }, _ => { - return Err(EngineError::new_container_registry_get_credentials_error( - event_details.clone(), - self.name_with_id(), - )); + return Err(ContainerRegistryError::CannotGetCredentials); } }; @@ -351,12 +269,6 @@ impl ECR { } } -impl ToTransmitter for ECR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } -} - impl ContainerRegistry for ECR { fn context(&self) -> &Context { &self.context @@ -374,15 +286,13 @@ impl ContainerRegistry for ECR { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ContainerRegistryError> { let client = StsClient::new_with_client(self.client(), Region::default()); let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); match s { Ok(_) => Ok(()), - Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( - self.get_event_details(), - )), + Err(_) => Err(ContainerRegistryError::InvalidCredentials), } } @@ -391,12 +301,12 @@ impl ContainerRegistry for ECR { self.registry_info.as_ref().unwrap() } - fn create_registry(&self) -> Result<(), EngineError> { + fn create_registry(&self) -> Result<(), ContainerRegistryError> { // Nothing to do, ECR require to create only repository Ok(()) } - fn create_repository(&self, name: &str) -> Result<(), EngineError> { + fn create_repository(&self, name: &str) -> Result<(), ContainerRegistryError> { let _ = self.get_or_create_repository(name)?; Ok(()) } @@ -404,10 +314,6 @@ impl ContainerRegistry for ECR { fn does_image_exists(&self, image: &Image) -> bool { self.get_image(image).is_some() } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } } impl Listen for ECR { diff --git a/src/container_registry/errors.rs b/src/container_registry/errors.rs new file mode 100644 index 00000000..830e9f32 --- /dev/null +++ b/src/container_registry/errors.rs @@ -0,0 +1,68 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum ContainerRegistryError { + #[error("Invalid credentials error.")] + InvalidCredentials, + #[error("Cannot get credentials error.")] + CannotGetCredentials, + #[error("Cannot create registry error for `{registry_name:?}`: {raw_error_message:?}.")] + CannotCreateRegistry { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot delete registry error for `{registry_name:?}`: {raw_error_message:?}.")] + CannotDeleteRegistry { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot delete image `{image_name:?}` error from repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotDeleteImage { + registry_name: String, + repository_name: String, + image_name: String, + raw_error_message: String, + }, + #[error("Image `{image_name:?}` doesn't exist in repository `{repository_name:?}` in registry `{registry_name:?}` error.")] + ImageDoesntExistInRegistry { + registry_name: String, + repository_name: String, + image_name: String, + }, + #[error("Repository `{repository_name:?}` doesn't exist in registry `{registry_name:?}` error.")] + RepositoryDoesntExistInRegistry { + registry_name: String, + repository_name: String, + }, + #[error("Registry `{registry_name:?}` doesn't exist, error: {raw_error_message:?}.")] + RegistryDoesntExist { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot link registry `{registry_name:?}` to cluster `{cluster_id:?}`: {raw_error_message:?}.")] + CannotLinkRegistryToCluster { + registry_name: String, + cluster_id: String, + raw_error_message: String, + }, + #[error("Cannot create repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotCreateRepository { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, + #[error( + "Cannot delete repository `{repository_name:?}` from registry `{registry_name:?}`: {raw_error_message:?}." + )] + CannotDeleteRepository { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, + #[error("Cannot set lifecycle policy for repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotSetRepositoryLifecyclePolicy { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, +} diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index dafec506..d5d07fef 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -2,16 +2,15 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::build_platform::Image; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; -use crate::logger::Logger; -use crate::models::{Context, Listen, QoveryIdentifier}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::models::{Context, Listen}; pub mod docr; pub mod ecr; +pub mod errors; pub mod scaleway_container_registry; -pub trait ContainerRegistry: Listen + ToTransmitter { +pub trait ContainerRegistry: Listen { fn context(&self) -> &Context; fn kind(&self) -> Kind; fn id(&self) -> &str; @@ -19,37 +18,23 @@ pub trait ContainerRegistry: Listen + ToTransmitter { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), EngineError>; + fn is_valid(&self) -> Result<(), ContainerRegistryError>; // Get info for this registry, url endpoint with login/password, image name convention, ... fn registry_info(&self) -> &ContainerRegistryInfo; // Some provider require specific action in order to allow container registry // For now it is only digital ocean, that require 2 steps to have registries - fn create_registry(&self) -> Result<(), EngineError>; + fn create_registry(&self) -> Result<(), ContainerRegistryError>; // Call to create a specific repository in the registry // i.e: docker.io/erebe or docker.io/qovery // All providers requires action for that // The convention for us is that we create one per application - fn create_repository(&self, repository_name: &str) -> Result<(), EngineError>; + fn create_repository(&self, repository_name: &str) -> Result<(), ContainerRegistryError>; // Check on the registry if a specific image already exist fn does_image_exists(&self, image: &Image) -> bool; - - fn logger(&self) -> &dyn Logger; - fn get_event_details(&self) -> EventDetails { - let context = self.context(); - EventDetails::new( - None, - QoveryIdentifier::from(context.organization_id().to_string()), - QoveryIdentifier::from(context.cluster_id().to_string()), - QoveryIdentifier::from(context.execution_id().to_string()), - None, - Stage::Environment(EnvironmentStep::Build), - self.to_transmitter(), - ) - } } pub struct ContainerRegistryInfo { diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 0d920d3c..148000e1 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -1,16 +1,13 @@ extern crate scaleway_api_rs; use crate::cloud_provider::scaleway::application::ScwZone; -use std::borrow::Borrow; use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; use crate::cmd::docker; +use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::models::{Context, Listen, Listener, Listeners}; use crate::runtime::block_on; use url::Url; @@ -23,7 +20,6 @@ pub struct ScalewayCR { zone: ScwZone, registry_info: ContainerRegistryInfo, listeners: Listeners, - logger: Box, } impl ScalewayCR { @@ -34,18 +30,7 @@ impl ScalewayCR { secret_token: &str, default_project_id: &str, zone: ScwZone, - logger: Box, - ) -> Result { - let event_details = EventDetails::new( - None, - QoveryIdentifier::from(context.organization_id().to_string()), - QoveryIdentifier::from(context.cluster_id().to_string()), - QoveryIdentifier::from(context.execution_id().to_string()), - None, - Stage::General(GeneralStep::ValidateSystemRequirements), - Transmitter::ContainerRegistry(id.to_string(), name.to_string()), - ); - + ) -> Result { // Be sure we are logged on the registry let login = "nologin".to_string(); let secret_token = secret_token.to_string(); @@ -55,9 +40,7 @@ impl ScalewayCR { let _ = registry.set_password(Some(&secret_token)); if context.docker.login(®istry).is_err() { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); + return Err(ContainerRegistryError::InvalidCredentials); } let registry_info = ContainerRegistryInfo { @@ -80,7 +63,6 @@ impl ScalewayCR { zone, registry_info, listeners: Vec::new(), - logger, }; Ok(cr) @@ -112,17 +94,7 @@ impl ScalewayCR { Some(namespace_name), )) { Ok(res) => res.namespaces, - Err(e) => { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new( - "Error while interacting with Scaleway API (list_namespaces).".to_string(), - Some(format!("error: {}, image: {}", e, namespace_name)), - ), - ), - ); + Err(_e) => { return None; } }; @@ -155,17 +127,7 @@ impl ScalewayCR { Some(self.default_project_id.as_str()), )) { Ok(res) => res.images, - Err(e) => { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new( - "Error while interacting with Scaleway API (list_namespaces).".to_string(), - Some(format!("error: {}, image: {}", e, &image.name())), - ), - ), - ); + Err(_e) => { return None; } }; @@ -183,21 +145,18 @@ impl ScalewayCR { None } - pub fn delete_image(&self, image: &Image) -> Result { - let event_details = self.get_event_details(); - + pub fn delete_image( + &self, + image: &Image, + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-67dbf7 let image_to_delete = self.get_image(image); if image_to_delete.is_none() { - let err = EngineError::new_container_registry_image_doesnt_exist( - event_details.clone(), - image.name().to_string(), - None, - ); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - return Err(err); + return Err(ContainerRegistryError::ImageDoesntExistInRegistry { + registry_name: self.name.to_string(), + repository_name: image.registry_name.to_string(), + image_name: image.name.to_string(), + }); } let image_to_delete = image_to_delete.unwrap(); @@ -208,26 +167,19 @@ impl ScalewayCR { image_to_delete.id.unwrap().as_str(), )) { Ok(res) => Ok(res), - Err(e) => { - let err = EngineError::new_container_registry_delete_image_error( - event_details.clone(), - image.name().to_string(), - Some(CommandError::new(e.to_string(), None)), - ); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - Err(err) - } + Err(e) => Err(ContainerRegistryError::CannotDeleteImage { + registry_name: self.name.to_string(), + repository_name: image.registry_name.to_string(), + image_name: image.name.to_string(), + raw_error_message: e.to_string(), + }), } } pub fn create_registry_namespace( &self, namespace_name: &str, - ) -> Result { - let event_details = self.get_event_details(); - + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#post-7a8fcc match block_on(scaleway_api_rs::apis::namespaces_api::create_namespace( &self.get_configuration(), @@ -241,40 +193,25 @@ impl ScalewayCR { }, )) { Ok(res) => Ok(res), - Err(e) => { - let error = EngineError::new_container_registry_namespace_creation_error( - event_details.clone(), - namespace_name.to_string(), - self.name_with_id(), - CommandError::new(e.to_string(), Some("Can't create SCW repository".to_string())), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - Err(error) - } + Err(e) => Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + raw_error_message: e.to_string(), + }), } } pub fn delete_registry_namespace( &self, namespace_name: &str, - ) -> Result { + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-c1ac9b - let event_details = self.get_event_details(); let registry_to_delete = self.get_registry_namespace(namespace_name); if registry_to_delete.is_none() { - let error = EngineError::new_container_registry_repository_doesnt_exist( - event_details.clone(), - namespace_name.to_string(), - None, - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + return Err(ContainerRegistryError::RepositoryDoesntExistInRegistry { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + }); } let registry_to_delete = registry_to_delete.unwrap(); @@ -286,16 +223,11 @@ impl ScalewayCR { )) { Ok(res) => Ok(res), Err(e) => { - let error = EngineError::new_container_registry_delete_repository_error( - event_details.clone(), - namespace_name.to_string(), - Some(CommandError::new(e.to_string(), None)), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + return Err(ContainerRegistryError::CannotDeleteRepository { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + raw_error_message: e.to_string(), + }); } } } @@ -303,20 +235,12 @@ impl ScalewayCR { pub fn get_or_create_registry_namespace( &self, namespace_name: &str, - ) -> Result { + ) -> Result { info!("Get/Create repository for {}", namespace_name); // check if the repository already exists - let event_details = self.get_event_details(); let registry_namespace = self.get_registry_namespace(namespace_name); if let Some(namespace) = registry_namespace { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("SCW repository {} already exists", namespace_name)), - ), - ); return Ok(namespace); } @@ -335,12 +259,6 @@ impl ScalewayCR { } } -impl ToTransmitter for ScalewayCR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } -} - impl ContainerRegistry for ScalewayCR { fn context(&self) -> &Context { &self.context @@ -358,7 +276,7 @@ impl ContainerRegistry for ScalewayCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ContainerRegistryError> { Ok(()) } @@ -366,12 +284,12 @@ impl ContainerRegistry for ScalewayCR { &self.registry_info } - fn create_registry(&self) -> Result<(), EngineError> { + fn create_registry(&self) -> Result<(), ContainerRegistryError> { // Nothing to do, scaleway managed container registry per repository (aka `namespace` by the scw naming convention) Ok(()) } - fn create_repository(&self, name: &str) -> Result<(), EngineError> { + fn create_repository(&self, name: &str) -> Result<(), ContainerRegistryError> { let _ = self.get_or_create_registry_namespace(name)?; Ok(()) } @@ -388,10 +306,6 @@ impl ContainerRegistry for ScalewayCR { Err(_) => false, } } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } } impl Listen for ScalewayCR { diff --git a/src/engine.rs b/src/engine.rs index 2284d672..770c6a29 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -1,14 +1,30 @@ use std::borrow::Borrow; use std::sync::Arc; +use thiserror::Error; use crate::build_platform::BuildPlatform; use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::CloudProvider; +use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::ContainerRegistry; use crate::dns_provider::DnsProvider; use crate::errors::EngineError; use crate::models::Context; +#[derive(Error, Debug, PartialEq)] +pub enum EngineConfigError { + #[error("Build platform is not valid error: {0}")] + BuildPlatformNotValid(EngineError), + #[error("Container registry is not valid error: {0}")] + ContainerRegistryNotValid(ContainerRegistryError), + #[error("Cloud provider is not valid error: {0}")] + CloudProviderNotValid(EngineError), + #[error("DNS provider is not valid error: {0}")] + DnsProviderNotValid(EngineError), + #[error("Kubernetes is not valid error: {0}")] + KubernetesNotValid(EngineError), +} + pub struct EngineConfig { context: Context, build_platform: Box, @@ -61,11 +77,22 @@ impl EngineConfig { (*self.dns_provider).borrow() } - pub fn is_valid(&self) -> Result<(), EngineError> { - self.build_platform.is_valid()?; - self.container_registry.is_valid()?; - self.cloud_provider.is_valid()?; - self.dns_provider.is_valid()?; + pub fn is_valid(&self) -> Result<(), EngineConfigError> { + if let Err(e) = self.build_platform.is_valid() { + return Err(EngineConfigError::BuildPlatformNotValid(e)); + } + + if let Err(e) = self.container_registry.is_valid() { + return Err(EngineConfigError::ContainerRegistryNotValid(e)); + } + + if let Err(e) = self.cloud_provider.is_valid() { + return Err(EngineConfigError::CloudProviderNotValid(e)); + } + + if let Err(e) = self.dns_provider.is_valid() { + return Err(EngineConfigError::DnsProviderNotValid(e)); + } Ok(()) } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index c13758b2..3cc36601 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -6,6 +6,7 @@ use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd; use crate::cmd::docker::DockerError; use crate::cmd::helm::HelmError; +use crate::container_registry::errors::ContainerRegistryError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; use crate::models::QoveryIdentifier; @@ -114,6 +115,12 @@ impl From for CommandError { } } +impl From for CommandError { + fn from(container_registry_error: ContainerRegistryError) -> Self { + CommandError::new_from_safe_message(container_registry_error.to_string()) + } +} + #[derive(Clone, Debug, PartialEq)] /// Tag: unique identifier for an error. pub enum Tag { @@ -303,7 +310,7 @@ pub enum Tag { ObjectStorageCannotActivateBucketVersioning, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] /// EngineError: represents an engine error. Engine will always returns such errors carrying context infos easing monitoring and debugging. pub struct EngineError { /// tag: error unique identifier @@ -2439,7 +2446,7 @@ impl EngineError { event_details: EventDetails, repository_name: String, registry_name: String, - raw_error: CommandError, + raw_error: ContainerRegistryError, ) -> EngineError { let message = format!( "Error, trying to create registry `{}` in `{}`.", @@ -2451,7 +2458,7 @@ impl EngineError { Tag::ContainerRegistryRepositoryCreationError, message.to_string(), message.to_string(), - Some(raw_error), + Some(raw_error.into()), None, None, ) @@ -2467,7 +2474,7 @@ impl EngineError { pub fn new_container_registry_repository_set_lifecycle_policy_error( event_details: EventDetails, repository_name: String, - raw_error: CommandError, + raw_error: ContainerRegistryError, ) -> EngineError { let message = format!( "Error, trying to set lifecycle policy repository `{}`.", @@ -2479,7 +2486,7 @@ impl EngineError { Tag::ContainerRegistryRepositorySetLifecycleError, message.to_string(), message.to_string(), - Some(raw_error), + Some(raw_error.into()), None, None, ) @@ -2521,7 +2528,7 @@ impl EngineError { pub fn new_container_registry_delete_image_error( event_details: EventDetails, image_name: String, - raw_error: Option, + raw_error: ContainerRegistryError, ) -> EngineError { let message = format!("Failed to delete image `{}`.", image_name,); @@ -2530,7 +2537,7 @@ impl EngineError { Tag::ContainerRegistryDeleteImageError, message.to_string(), message.to_string(), - raw_error, + Some(raw_error.into()), None, None, ) @@ -2545,7 +2552,7 @@ impl EngineError { pub fn new_container_registry_image_doesnt_exist( event_details: EventDetails, image_name: String, - raw_error: Option, + raw_error: ContainerRegistryError, ) -> EngineError { let message = format!("Image `{}` doesn't exists.", image_name,); @@ -2554,7 +2561,7 @@ impl EngineError { Tag::ContainerRegistryImageDoesntExist, message.to_string(), message.to_string(), - raw_error, + Some(raw_error.into()), None, None, ) @@ -2819,3 +2826,9 @@ impl EngineError { ) } } + +impl Display for EngineError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(format!("{:?}", self).as_str()) + } +} diff --git a/src/events/mod.rs b/src/events/mod.rs index 659b3d37..2cf35408 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -154,7 +154,7 @@ impl Display for EventMessage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// Stage: represents an engine event stage, can be General, Infrastructure or Environment. pub enum Stage { /// GeneralStep: general stage in the engine, usually used across all stages. @@ -190,7 +190,7 @@ impl Display for Stage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// GeneralStep: represents an engine general step usually shared across all engine stages pub enum GeneralStep { /// ValidateSystemRequirements: validating system requirements @@ -218,7 +218,7 @@ impl Display for GeneralStep { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// InfrastructureStep: represents an engine infrastructure step. pub enum InfrastructureStep { /// LoadConfiguration: first step in infrastructure, aiming to load all configuration (from Terraform, etc). @@ -255,7 +255,7 @@ impl Display for InfrastructureStep { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// EnvironmentStep: represents an engine environment step. pub enum EnvironmentStep { /// LoadConfiguration: first step in environment, aiming to load all configuration (from Terraform, etc). @@ -309,7 +309,7 @@ type TransmitterName = String; /// TransmitterType: represents a transmitter type. type TransmitterType = String; // TODO(benjaminch): makes it a real enum / type -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// Transmitter: represents the event's source caller (transmitter). pub enum Transmitter { /// BuildPlatform: platform aiming to build applications images. @@ -358,7 +358,7 @@ impl Display for Transmitter { /// Region: represents event's cloud provider region. type Region = String; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// EventDetails: represents an event details, carrying all useful data such as Qovery identifiers, transmitter, stage etc. pub struct EventDetails { /// provider_kind: cloud provider name. an be set to None if not linked to any provider kind. diff --git a/src/models.rs b/src/models.rs index eff991cc..0e830377 100644 --- a/src/models.rs +++ b/src/models.rs @@ -27,7 +27,7 @@ use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::utilities::get_image_tag; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { raw_long_id: String, short: String, diff --git a/src/transaction.rs b/src/transaction.rs index e2fb863b..b646f388 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -2,7 +2,7 @@ use std::thread; use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::service::Service; -use crate::engine::EngineConfig; +use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EventMessage}; use crate::logger::{LogLevel, Logger}; @@ -26,9 +26,11 @@ impl<'a> Transaction<'a> { logger: Box, is_transaction_aborted: Box bool>, on_step_change: Box, - ) -> Result { + ) -> Result { let _ = engine.is_valid()?; - let _ = engine.kubernetes().is_valid()?; + if let Err(e) = engine.kubernetes().is_valid() { + return Err(EngineConfigError::KubernetesNotValid(e)); + } let mut tx = Transaction::<'a> { engine, @@ -118,7 +120,7 @@ impl<'a> Transaction<'a> { // Do setup of registry and be sure we are login to the registry let cr_registry = self.engine.container_registry(); - let _ = cr_registry.create_registry()?; + let _ = cr_registry.create_registry(); let registry = self.engine.container_registry().registry_info(); for app in apps_to_build.into_iter() { @@ -130,7 +132,7 @@ impl<'a> Transaction<'a> { } // Be sure that our repository exist before trying to pull/push images from it - let _ = self.engine.container_registry().create_repository(&app.name)?; + let _ = self.engine.container_registry().create_repository(&app.name); // Ok now everything is setup, we can try to build the app let _ = self diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index d6540cdb..41f59b17 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -20,7 +20,7 @@ use tracing::error; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; -use crate::utilities::{build_platform_local_docker, logger, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3; pub const AWS_TEST_REGION: AwsRegion = AwsRegion::EuWest3; @@ -49,7 +49,6 @@ pub fn container_registry_ecr(context: &Context) -> ECR { secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(), secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(), secrets.AWS_DEFAULT_REGION.unwrap().as_str(), - logger(), ) .unwrap() } diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 03a8920d..481d76ec 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -12,7 +12,7 @@ use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; -use crate::utilities::{build_platform_local_docker, logger, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; @@ -38,7 +38,6 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { DOCR_ID, DOCR_ID, secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), - logger(), ) .unwrap() } @@ -175,7 +174,6 @@ pub fn clean_environments( .DIGITAL_OCEAN_TOKEN .as_ref() .expect("DIGITAL_OCEAN_TOKEN is not set in secrets"), - logger(), ); // FIXME: re-enable it, or let pleco do its job ? diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 8799229b..86e4f27d 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -11,16 +11,16 @@ use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrateg use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; -use crate::utilities::{build_platform_local_docker, generate_id, logger, FuncTestsSecrets}; +use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Scw; +use qovery_engine::container_registry::errors::ContainerRegistryError; use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; use tracing::error; @@ -59,7 +59,6 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { scw_secret_key.as_str(), scw_default_project_id.as_str(), SCW_TEST_ZONE, - logger(), ) .unwrap() } @@ -226,7 +225,7 @@ pub fn clean_environments( environments: Vec, secrets: FuncTestsSecrets, zone: ScwZone, -) -> Result<(), EngineError> { +) -> Result<(), ContainerRegistryError> { let secret_token = secrets.SCALEWAY_SECRET_KEY.unwrap(); let project_id = secrets.SCALEWAY_DEFAULT_PROJECT_ID.unwrap(); @@ -237,9 +236,7 @@ pub fn clean_environments( secret_token.as_str(), project_id.as_str(), zone, - logger(), - ) - .unwrap(); + )?; // delete images created in registry let registry_url = container_registry_client.registry_info(); diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index dd2fee90..9b38fb54 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -3,7 +3,6 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; -use test_utilities::utilities::logger; use tracing::debug; use uuid::Uuid; @@ -45,7 +44,6 @@ fn test_get_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - logger(), ) .unwrap(); @@ -95,7 +93,6 @@ fn test_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - logger(), ) .unwrap(); @@ -139,7 +136,6 @@ fn test_delete_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - logger(), ) .unwrap(); @@ -177,7 +173,6 @@ fn test_get_or_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - logger(), ) .unwrap(); From fcf4d3a26df54f11ec875ea6e2f1010edf052797 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 23 Mar 2022 08:12:39 +0100 Subject: [PATCH 45/85] Refacto to having double compute of image hash (#654) * Refacto to having double compute of image hash * Pre-compute registry name --- src/build_platform/local_docker.rs | 16 +- src/build_platform/mod.rs | 19 +- src/cloud_provider/aws/application.rs | 66 ++- src/cloud_provider/aws/databases/mongodb.rs | 30 +- src/cloud_provider/aws/databases/mysql.rs | 28 +- .../aws/databases/postgresql.rs | 30 +- src/cloud_provider/aws/databases/redis.rs | 28 +- src/cloud_provider/aws/router.rs | 34 +- .../digitalocean/application.rs | 66 ++- .../digitalocean/databases/mongodb.rs | 38 +- .../digitalocean/databases/mysql.rs | 28 +- .../digitalocean/databases/postgresql.rs | 30 +- .../digitalocean/databases/redis.rs | 36 +- src/cloud_provider/digitalocean/router.rs | 34 +- src/cloud_provider/environment.rs | 56 +- src/cloud_provider/kubernetes.rs | 28 +- src/cloud_provider/scaleway/application.rs | 75 +-- .../scaleway/databases/mongodb.rs | 30 +- .../scaleway/databases/mysql.rs | 36 +- .../scaleway/databases/postgresql.rs | 36 +- .../scaleway/databases/redis.rs | 36 +- src/cloud_provider/scaleway/router.rs | 34 +- src/cloud_provider/service.rs | 11 +- src/container_registry/docr.rs | 3 + src/container_registry/ecr.rs | 1 + src/container_registry/mod.rs | 9 + .../scaleway_container_registry.rs | 1 + src/errors/mod.rs | 18 + src/models.rs | 539 ++++++++---------- src/transaction.rs | 181 +++--- test_utilities/src/common.rs | 105 ++-- test_utilities/src/digitalocean.rs | 4 +- test_utilities/src/scaleway.rs | 4 +- test_utilities/src/utilities.rs | 12 +- 34 files changed, 897 insertions(+), 805 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 04e4e8a0..22d2d924 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,7 +6,7 @@ use std::{env, fs}; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; -use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentials, Kind}; +use crate::build_platform::{docker, Build, BuildPlatform, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; @@ -82,13 +82,13 @@ impl LocalDocker { fn build_image_with_docker( &self, - build: Build, + build: &Build, dockerfile_complete_path: &str, into_dir_docker_style: &str, env_var_args: Vec, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result { + ) -> Result<(), EngineError> { let image_to_build = ContainerImage { registry: build.image.registry_url.clone(), name: build.image.name(), @@ -169,7 +169,7 @@ impl LocalDocker { ); match exit_status { - Ok(_) => Ok(BuildResult { build }), + Ok(_) => Ok(()), Err(DockerError::Aborted(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), Err(err) => Err(EngineError::new_docker_cannot_build_container_image( self.get_event_details(), @@ -181,13 +181,13 @@ impl LocalDocker { fn build_image_with_buildpacks( &self, - build: Build, + build: &Build, into_dir_docker_style: &str, env_var_args: Vec, use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result { + ) -> Result<(), EngineError> { let name_with_tag = build.image.full_image_name_with_tag(); let name_with_latest_tag = format!("{}:latest", build.image.full_image_name()); @@ -318,7 +318,7 @@ impl LocalDocker { } match exit_status { - Ok(_) => Ok(BuildResult { build }), + Ok(_) => Ok(()), Err(Killed(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), Err(err) => { let error = EngineError::new_buildpack_cannot_build_container_image( @@ -386,7 +386,7 @@ impl BuildPlatform for LocalDocker { Ok(()) } - fn build(&self, build: Build, is_task_canceled: &dyn Fn() -> bool) -> Result { + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), EngineError> { let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); let app_id = build.image.application_id.clone(); diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 49cdf6a1..50998660 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -20,7 +20,7 @@ pub trait BuildPlatform: ToTransmitter + Listen { format!("{} ({})", self.name(), self.id()) } fn is_valid(&self) -> Result<(), EngineError>; - fn build(&self, build: Build, is_task_canceled: &dyn Fn() -> bool) -> Result; + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), EngineError>; fn logger(&self) -> Box; fn get_event_details(&self) -> EventDetails { let context = self.context(); @@ -81,12 +81,13 @@ pub struct Image { pub name: String, pub tag: String, pub commit_id: String, - // registry name where the image has been pushed: Optional + // registry name where the image has been pushed pub registry_name: String, // registry docker json config: Optional pub registry_docker_json_config: Option, // complete registry URL where the image has been pushed pub registry_url: Url, + pub repository_name: String, } impl Image { @@ -94,6 +95,9 @@ impl Image { self.registry_url.host_str().unwrap() } + pub fn repository_name(&self) -> &str { + &self.repository_name + } pub fn full_image_name_with_tag(&self) -> String { format!( "{}/{}:{}", @@ -122,6 +126,7 @@ impl Default for Image { registry_name: "".to_string(), registry_docker_json_config: None, registry_url: Url::parse("https://default.com").unwrap(), + repository_name: "".to_string(), } } } @@ -136,16 +141,6 @@ impl Display for Image { } } -pub struct BuildResult { - pub build: Build, -} - -impl BuildResult { - pub fn new(build: Build) -> Self { - BuildResult { build } - } -} - #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs index 9fe7bb24..47ff01dd 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/cloud_provider/aws/application.rs @@ -1,14 +1,14 @@ use tera::Context as TeraContext; -use crate::build_platform::Image; +use crate::build_platform::Build; use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; use crate::cloud_provider::models::{ EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, }; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm, - Pause, Service, ServiceType, StatelessService, + scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, + ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -20,7 +20,7 @@ use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; use ::function_name::named; -pub struct Application { +pub struct ApplicationAws { context: Context, id: String, action: Action, @@ -32,14 +32,14 @@ pub struct Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, } -impl Application { +impl ApplicationAws { pub fn new( context: Context, id: &str, @@ -52,13 +52,13 @@ impl Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, ) -> Self { - Application { + ApplicationAws { context, id: id.to_string(), action, @@ -70,7 +70,7 @@ impl Application { min_instances, max_instances, start_timeout_in_seconds, - image, + build, storage, environment_variables, listeners, @@ -91,17 +91,7 @@ impl Application { } } -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { +impl Helm for ApplicationAws { fn helm_selector(&self) -> Option { self.selector() } @@ -123,15 +113,29 @@ impl Helm for Application { } } -impl StatelessService for Application {} +impl StatelessService for ApplicationAws { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Application { +impl ToTransmitter for ApplicationAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Application(self.id.to_string(), self.name.to_string()) } } -impl Service for Application { +impl Application for ApplicationAws { + fn get_build(&self) -> &Build { + &self.build + } + + fn get_build_mut(&mut self) -> &mut Build { + &mut self.build + } +} + +impl Service for ApplicationAws { fn context(&self) -> &Context { &self.context } @@ -153,7 +157,7 @@ impl Service for Application { } fn version(&self) -> String { - self.image.commit_id.clone() + self.build.image.commit_id.clone() } fn action(&self) -> &Action { @@ -198,10 +202,10 @@ impl Service for Application { fn tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let mut context = default_tera_context(self, target.kubernetes, target.environment); - let commit_id = self.image().commit_id.as_str(); + let commit_id = self.build.image.commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); let environment_variables = self .environment_variables @@ -215,7 +219,7 @@ impl Service for Application { context.insert("environment_variables", &environment_variables); context.insert("ports", &self.ports); context.insert("is_registry_secret", &true); - context.insert("registry_secret", self.image.registry_host()); + context.insert("registry_secret", self.build.image.registry_host()); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -284,7 +288,7 @@ impl Service for Application { } } -impl Create for Application { +impl Create for ApplicationAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -323,7 +327,7 @@ impl Create for Application { } } -impl Pause for Application { +impl Pause for ApplicationAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -366,7 +370,7 @@ impl Pause for Application { } } -impl Delete for Application { +impl Delete for ApplicationAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -406,7 +410,7 @@ impl Delete for Application { } } -impl Listen for Application { +impl Listen for ApplicationAws { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 9ff964b9..58e732c7 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -21,7 +21,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDbAws { context: Context, id: String, action: Action, @@ -37,7 +37,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDbAws { pub fn new( context: Context, id: &str, @@ -53,7 +53,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDbAws { context, action, id: id.to_string(), @@ -92,13 +92,17 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDbAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl Service for MongoDB { +impl Service for MongoDbAws { fn context(&self) -> &Context { &self.context } @@ -243,9 +247,9 @@ impl Service for MongoDB { } } -impl Database for MongoDB {} +impl Database for MongoDbAws {} -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDbAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -255,7 +259,7 @@ impl ToTransmitter for MongoDB { } } -impl Helm for MongoDB { +impl Helm for MongoDbAws { fn helm_selector(&self) -> Option { self.selector() } @@ -277,7 +281,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDbAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -287,7 +291,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDbAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -330,7 +334,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDbAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -368,7 +372,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDbAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -405,7 +409,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDbAws { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs index 0375fe36..f1569d0d 100644 --- a/src/cloud_provider/aws/databases/mysql.rs +++ b/src/cloud_provider/aws/databases/mysql.rs @@ -22,7 +22,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, DatabaseKind, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MySQL { +pub struct MySQLAws { context: Context, id: String, action: Action, @@ -38,7 +38,7 @@ pub struct MySQL { logger: Box, } -impl MySQL { +impl MySQLAws { pub fn new( context: Context, id: &str, @@ -93,13 +93,17 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -109,7 +113,7 @@ impl ToTransmitter for MySQL { } } -impl Service for MySQL { +impl Service for MySQLAws { fn context(&self) -> &Context { &self.context } @@ -262,9 +266,9 @@ impl Service for MySQL { } } -impl Database for MySQL {} +impl Database for MySQLAws {} -impl Helm for MySQL { +impl Helm for MySQLAws { fn helm_selector(&self) -> Option { self.selector() } @@ -286,7 +290,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -296,7 +300,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -340,7 +344,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -377,7 +381,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -414,7 +418,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLAws { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs index 13db226f..754daf69 100644 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ b/src/cloud_provider/aws/databases/postgresql.rs @@ -22,7 +22,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct PostgreSQL { +pub struct PostgreSQLAws { context: Context, id: String, action: Action, @@ -38,7 +38,7 @@ pub struct PostgreSQL { logger: Box, } -impl PostgreSQL { +impl PostgreSQLAws { pub fn new( context: Context, id: &str, @@ -54,7 +54,7 @@ impl PostgreSQL { listeners: Listeners, logger: Box, ) -> Self { - PostgreSQL { + PostgreSQLAws { context, action, id: id.to_string(), @@ -93,13 +93,17 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgreSQLAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgreSQLAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -109,7 +113,7 @@ impl ToTransmitter for PostgreSQL { } } -impl Service for PostgreSQL { +impl Service for PostgreSQLAws { fn context(&self) -> &Context { &self.context } @@ -250,9 +254,9 @@ impl Service for PostgreSQL { } } -impl Database for PostgreSQL {} +impl Database for PostgreSQLAws {} -impl Helm for PostgreSQL { +impl Helm for PostgreSQLAws { fn helm_selector(&self) -> Option { self.selector() } @@ -274,7 +278,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgreSQLAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -284,7 +288,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgreSQLAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -328,7 +332,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgreSQLAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -366,7 +370,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgreSQLAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -404,7 +408,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgreSQLAws { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs index dbbfe976..4bc1acd8 100644 --- a/src/cloud_provider/aws/databases/redis.rs +++ b/src/cloud_provider/aws/databases/redis.rs @@ -19,7 +19,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Redis { +pub struct RedisAws { context: Context, id: String, action: Action, @@ -35,7 +35,7 @@ pub struct Redis { logger: Box, } -impl Redis { +impl RedisAws { pub fn new( context: Context, id: &str, @@ -90,13 +90,17 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -106,7 +110,7 @@ impl ToTransmitter for Redis { } } -impl Service for Redis { +impl Service for RedisAws { fn context(&self) -> &Context { &self.context } @@ -266,9 +270,9 @@ impl Service for Redis { } } -impl Database for Redis {} +impl Database for RedisAws {} -impl Helm for Redis { +impl Helm for RedisAws { fn helm_selector(&self) -> Option { self.selector() } @@ -290,7 +294,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -300,7 +304,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -343,7 +347,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -381,7 +385,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -418,7 +422,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisAws { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index bef43303..eec33d08 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -16,7 +16,7 @@ use crate::logger::{LogLevel, Logger}; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Router { +pub struct RouterAws { context: Context, id: String, name: String, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterAws { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterAws { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterAws { fn context(&self) -> &Context { &self.context } @@ -133,8 +133,8 @@ impl Service for Router { let mut context = default_tera_context(self, kubernetes, environment); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -250,7 +250,7 @@ impl Service for Router { } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterAws { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -266,7 +266,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterAws { fn helm_selector(&self) -> Option { self.selector() } @@ -288,7 +288,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -298,15 +298,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterAws { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -422,7 +426,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterAws { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -456,7 +460,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index fb24f5b2..13763ab7 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -1,14 +1,14 @@ use tera::Context as TeraContext; -use crate::build_platform::Image; +use crate::build_platform::Build; use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; use crate::cloud_provider::models::{ EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, }; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Create, Delete, Helm, Pause, Service, ServiceType, - StatelessService, + scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, + ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -22,7 +22,7 @@ use ::function_name::named; use std::fmt; use std::str::FromStr; -pub struct Application { +pub struct ApplicationDo { context: Context, id: String, action: Action, @@ -34,14 +34,14 @@ pub struct Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, } -impl Application { +impl ApplicationDo { pub fn new( context: Context, id: &str, @@ -54,13 +54,13 @@ impl Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, ) -> Self { - Application { + ApplicationDo { context, id: id.to_string(), action, @@ -72,7 +72,7 @@ impl Application { min_instances, max_instances, start_timeout_in_seconds, - image, + build, storage, environment_variables, listeners, @@ -93,17 +93,7 @@ impl Application { } } -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { +impl Helm for ApplicationDo { fn helm_selector(&self) -> Option { self.selector() } @@ -125,15 +115,29 @@ impl Helm for Application { } } -impl StatelessService for Application {} +impl StatelessService for ApplicationDo { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Application { +impl Application for ApplicationDo { + fn get_build(&self) -> &Build { + &self.build + } + + fn get_build_mut(&mut self) -> &mut Build { + &mut self.build + } +} + +impl ToTransmitter for ApplicationDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Application(self.id().to_string(), self.name().to_string()) } } -impl Service for Application { +impl Service for ApplicationDo { fn context(&self) -> &Context { &self.context } @@ -155,7 +159,7 @@ impl Service for Application { } fn version(&self) -> String { - self.image.commit_id.clone() + self.build.image.commit_id.clone() } fn action(&self) -> &Action { @@ -202,10 +206,10 @@ impl Service for Application { let kubernetes = target.kubernetes; let environment = target.environment; let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.image.commit_id.as_str(); + let commit_id = self.build.image.commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -243,7 +247,7 @@ impl Service for Application { // This is specific to digital ocean as it is them that create the registry secret // we don't have the hand on it - context.insert("registry_secret", &self.image.registry_name); + context.insert("registry_secret", &self.build.image.registry_name); let storage = self .storage @@ -287,7 +291,7 @@ impl Service for Application { } } -impl Create for Application { +impl Create for ApplicationDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -327,7 +331,7 @@ impl Create for Application { } } -impl Pause for Application { +impl Pause for ApplicationDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -370,7 +374,7 @@ impl Pause for Application { } } -impl Delete for Application { +impl Delete for ApplicationDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -410,7 +414,7 @@ impl Delete for Application { } } -impl Listen for Application { +impl Listen for ApplicationDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index b05ca2a7..177fb0eb 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDo { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDo { context, action, id: id.to_string(), @@ -83,13 +83,17 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for MongoDB { } } -impl Service for MongoDB { +impl Service for MongoDo { fn context(&self) -> &Context { &self.context } @@ -222,18 +226,18 @@ impl Service for MongoDB { Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for MongoDB {} +impl Database for MongoDo {} -impl Helm for MongoDB { +impl Helm for MongoDo { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +259,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -265,7 +269,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -303,7 +307,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -341,7 +345,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -379,7 +383,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index 9ab89351..ccd201bb 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MySQL { +pub struct MySQLDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MySQL { logger: Box, } -impl MySQL { +impl MySQLDo { pub fn new( context: Context, id: &str, @@ -83,13 +83,17 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for MySQL { } } -impl Service for MySQL { +impl Service for MySQLDo { fn context(&self) -> &Context { &self.context } @@ -231,9 +235,9 @@ impl Service for MySQL { } } -impl Database for MySQL {} +impl Database for MySQLDo {} -impl Helm for MySQL { +impl Helm for MySQLDo { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +259,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -265,7 +269,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -306,7 +310,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -344,7 +348,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -383,7 +387,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index d539ee5d..9b7dbd1e 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct PostgreSQL { +pub struct PostgresDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct PostgreSQL { logger: Box, } -impl PostgreSQL { +impl PostgresDo { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl PostgreSQL { listeners: Listeners, logger: Box, ) -> Self { - PostgreSQL { + PostgresDo { context, action, id: id.to_string(), @@ -83,13 +83,17 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgresDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgresDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for PostgreSQL { } } -impl Service for PostgreSQL { +impl Service for PostgresDo { fn context(&self) -> &Context { &self.context } @@ -233,9 +237,9 @@ impl Service for PostgreSQL { } } -impl Database for PostgreSQL {} +impl Database for PostgresDo {} -impl Helm for PostgreSQL { +impl Helm for PostgresDo { fn helm_selector(&self) -> Option { self.selector() } @@ -257,7 +261,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgresDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -267,7 +271,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgresDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -307,7 +311,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgresDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -345,7 +349,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgresDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -385,7 +389,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgresDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 98284ca0..8ed8d5b2 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Redis { +pub struct RedisDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct Redis { logger: Box, } -impl Redis { +impl RedisDo { pub fn new( context: Context, id: &str, @@ -83,13 +83,17 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for Redis { } } -impl Service for Redis { +impl Service for RedisDo { fn context(&self) -> &Context { &self.context } @@ -221,18 +225,18 @@ impl Service for Redis { Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for Redis {} +impl Database for RedisDo {} -impl Helm for Redis { +impl Helm for RedisDo { fn helm_selector(&self) -> Option { self.selector() } @@ -254,7 +258,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -264,7 +268,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -304,7 +308,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -341,7 +345,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -380,7 +384,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index ceaa1671..87635ad6 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -16,7 +16,7 @@ use crate::logger::{LogLevel, Logger}; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Router { +pub struct RouterDo { context: Context, id: String, action: Action, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterDo { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterDo { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterDo { fn context(&self) -> &Context { &self.context } @@ -134,8 +134,8 @@ impl Service for Router { context.insert("doks_cluster_id", kubernetes.id()); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -263,7 +263,7 @@ impl Service for Router { } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterDo { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -279,7 +279,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterDo { fn helm_selector(&self) -> Option { self.selector() } @@ -304,7 +304,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterDo { fn listeners(&self) -> &Listeners { &self.listeners } @@ -314,15 +314,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterDo { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -438,7 +442,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterDo { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -472,7 +476,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 11a1f96a..624532a7 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::service::{Action, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, Application, Database, Router, StatefulService, StatelessService}; use crate::unit_conversion::cpu_string_to_float; pub struct Environment { @@ -7,8 +7,10 @@ pub struct Environment { pub project_id: String, pub owner_id: String, pub organization_id: String, - pub stateless_services: Vec>, - pub stateful_services: Vec>, + pub action: Action, + pub applications: Vec>, + pub routers: Vec>, + pub databases: Vec>, } impl Environment { @@ -17,8 +19,10 @@ impl Environment { project_id: &str, owner_id: &str, organization_id: &str, - stateless_services: Vec>, - stateful_services: Vec>, + action: Action, + applications: Vec>, + routers: Vec>, + databases: Vec>, ) -> Self { Environment { namespace: format!("{}-{}", project_id, id), @@ -26,11 +30,41 @@ impl Environment { project_id: project_id.to_string(), owner_id: owner_id.to_string(), organization_id: organization_id.to_string(), - stateless_services, - stateful_services, + action, + applications, + routers, + databases, } } + pub fn stateless_services(&self) -> Vec<&dyn StatelessService> { + let mut stateless_services: Vec<&dyn StatelessService> = + Vec::with_capacity(self.applications.len() + self.routers.len()); + stateless_services.extend_from_slice( + self.applications + .iter() + .map(|x| x.as_stateless_service()) + .collect::>() + .as_slice(), + ); + stateless_services.extend_from_slice( + self.routers + .iter() + .map(|x| x.as_stateless_service()) + .collect::>() + .as_slice(), + ); + + stateless_services + } + + pub fn stateful_services(&self) -> Vec<&dyn StatefulService> { + self.databases + .iter() + .map(|x| x.as_stateful_service()) + .collect::>() + } + pub fn namespace(&self) -> &str { self.namespace.as_str() } @@ -41,10 +75,10 @@ impl Environment { pub fn required_resources(&self) -> EnvironmentResources { let mut total_cpu_for_stateless_services: f32 = 0.0; let mut total_ram_in_mib_for_stateless_services: u32 = 0; - let mut required_pods = self.stateless_services.len() as u32; + let mut required_pods = self.stateless_services().len() as u32; - for service in &self.stateless_services { - match *service.action() { + for service in self.stateless_services() { + match service.action() { Action::Create | Action::Nothing => { total_cpu_for_stateless_services += cpu_string_to_float(&service.total_cpus()); total_ram_in_mib_for_stateless_services += &service.total_ram_in_mib(); @@ -56,7 +90,7 @@ impl Environment { let mut total_cpu_for_stateful_services: f32 = 0.0; let mut total_ram_in_mib_for_stateful_services: u32 = 0; - for service in &self.stateful_services { + for service in self.stateful_services() { if service.is_managed_service() { // If it is a managed service, we don't care of its resources as it is not managed by us continue; diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 85c6e422..e5eb10b9 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -434,7 +434,7 @@ pub fn deploy_environment( }; // create all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.exec_action(&stateful_deployment_target), kubernetes, @@ -471,7 +471,7 @@ pub fn deploy_environment( }; // create all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.exec_action(&stateless_deployment_target), kubernetes, @@ -489,7 +489,7 @@ pub fn deploy_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.exec_check_action(), kubernetes, @@ -506,7 +506,7 @@ pub fn deploy_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.exec_check_action(), kubernetes, @@ -547,7 +547,7 @@ pub fn deploy_environment_error( }; // clean up all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_create_error(&stateful_deployment_target), kubernetes, @@ -571,7 +571,7 @@ pub fn deploy_environment_error( }; // clean up all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_create_error(&stateless_deployment_target), kubernetes, @@ -609,7 +609,7 @@ pub fn pause_environment( }; // create all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_pause(&stateless_deployment_target), kubernetes, @@ -627,7 +627,7 @@ pub fn pause_environment( thread::sleep(std::time::Duration::from_millis(100)); // create all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_pause(&stateful_deployment_target), kubernetes, @@ -644,7 +644,7 @@ pub fn pause_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_pause_check(), kubernetes, @@ -662,7 +662,7 @@ pub fn pause_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_pause_check(), kubernetes, @@ -700,7 +700,7 @@ pub fn delete_environment( }; // delete all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_delete(&stateful_deployment_target), kubernetes, @@ -718,7 +718,7 @@ pub fn delete_environment( thread::sleep(std::time::Duration::from_millis(100)); // delete all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_delete(&stateful_deployment_target), kubernetes, @@ -735,7 +735,7 @@ pub fn delete_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_delete_check(), kubernetes, @@ -753,7 +753,7 @@ pub fn delete_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_delete_check(), kubernetes, diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index 81cd52e8..c2549ec0 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -3,15 +3,15 @@ use std::str::FromStr; use tera::Context as TeraContext; -use crate::build_platform::Image; +use crate::build_platform::Build; use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; use crate::cloud_provider::models::{ EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, }; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm, - Pause, Service, ServiceType, StatelessService, + scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, + ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -23,7 +23,7 @@ use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; use ::function_name::named; -pub struct Application { +pub struct ApplicationScw { context: Context, id: String, action: Action, @@ -35,14 +35,14 @@ pub struct Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, } -impl Application { +impl ApplicationScw { pub fn new( context: Context, id: &str, @@ -55,13 +55,13 @@ impl Application { min_instances: u32, max_instances: u32, start_timeout_in_seconds: u32, - image: Image, + build: Build, storage: Vec>, environment_variables: Vec, listeners: Listeners, logger: Box, ) -> Self { - Application { + ApplicationScw { context, id: id.to_string(), action, @@ -73,7 +73,7 @@ impl Application { min_instances, max_instances, start_timeout_in_seconds, - image, + build, storage, environment_variables, listeners, @@ -94,17 +94,7 @@ impl Application { } } -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { +impl Helm for ApplicationScw { fn helm_selector(&self) -> Option { self.selector() } @@ -126,15 +116,29 @@ impl Helm for Application { } } -impl StatelessService for Application {} +impl StatelessService for ApplicationScw { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Application { +impl Application for ApplicationScw { + fn get_build(&self) -> &Build { + &self.build + } + + fn get_build_mut(&mut self) -> &mut Build { + &mut self.build + } +} + +impl ToTransmitter for ApplicationScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Application(self.id().to_string(), self.name().to_string()) } } -impl Service for Application { +impl Service for ApplicationScw { fn context(&self) -> &Context { &self.context } @@ -156,7 +160,7 @@ impl Service for Application { } fn version(&self) -> String { - self.image.commit_id.clone() + self.build.image.commit_id.clone() } fn action(&self) -> &Action { @@ -203,10 +207,10 @@ impl Service for Application { let kubernetes = target.kubernetes; let environment = target.environment; let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.image().commit_id.as_str(); + let commit_id = self.build.image.commit_id.as_str(); context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.image.full_image_name_with_tag()); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); let environment_variables = self .environment_variables @@ -280,7 +284,8 @@ impl Service for Application { // container registry credentials context.insert( "container_registry_docker_json_config", - self.image + self.build + .image .clone() .registry_docker_json_config .unwrap_or("".to_string()) @@ -290,16 +295,16 @@ impl Service for Application { Ok(context) } - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("appId={}", self.id)) + } } -impl Create for Application { +impl Create for ApplicationScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -339,7 +344,7 @@ impl Create for Application { } } -impl Pause for Application { +impl Pause for ApplicationScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -382,7 +387,7 @@ impl Pause for Application { } } -impl Delete for Application { +impl Delete for ApplicationScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -422,7 +427,7 @@ impl Delete for Application { } } -impl Listen for Application { +impl Listen for ApplicationScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index dbef1c9b..933731cb 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDbScw { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDbScw { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDbScw { context, action, id: id.to_string(), @@ -83,13 +83,17 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDbScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDbScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for MongoDB { } } -impl Service for MongoDB { +impl Service for MongoDbScw { fn context(&self) -> &Context { &self.context } @@ -233,9 +237,9 @@ impl Service for MongoDB { } } -impl Database for MongoDB {} +impl Database for MongoDbScw {} -impl Helm for MongoDB { +impl Helm for MongoDbScw { fn helm_selector(&self) -> Option { self.selector() } @@ -257,7 +261,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDbScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -267,7 +271,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDbScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -310,7 +314,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDbScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -348,7 +352,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDbScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -385,7 +389,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDbScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index 4f6472c3..f543c5a3 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -19,7 +19,7 @@ use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; -pub struct MySQL { +pub struct MySQLScw { context: Context, id: String, action: Action, @@ -35,7 +35,7 @@ pub struct MySQL { logger: Box, } -impl MySQL { +impl MySQLScw { pub fn new( context: Context, id: &str, @@ -110,13 +110,17 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -126,7 +130,7 @@ impl ToTransmitter for MySQL { } } -impl Service for MySQL { +impl Service for MySQLScw { fn context(&self) -> &Context { &self.context } @@ -255,18 +259,18 @@ impl Service for MySQL { Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for MySQL {} +impl Database for MySQLScw {} -impl Helm for MySQL { +impl Helm for MySQLScw { fn helm_selector(&self) -> Option { self.selector() } @@ -288,7 +292,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -298,7 +302,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -342,7 +346,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -380,7 +384,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -417,7 +421,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index c5b30b5e..376611d1 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -19,7 +19,7 @@ use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; -pub struct PostgreSQL { +pub struct PostgresScw { context: Context, id: String, action: Action, @@ -35,7 +35,7 @@ pub struct PostgreSQL { logger: Box, } -impl PostgreSQL { +impl PostgresScw { pub fn new( context: Context, id: &str, @@ -119,13 +119,17 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgresScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgresScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -135,7 +139,7 @@ impl ToTransmitter for PostgreSQL { } } -impl Service for PostgreSQL { +impl Service for PostgresScw { fn context(&self) -> &Context { &self.context } @@ -264,18 +268,18 @@ impl Service for PostgreSQL { Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for PostgreSQL {} +impl Database for PostgresScw {} -impl Helm for PostgreSQL { +impl Helm for PostgresScw { fn helm_selector(&self) -> Option { self.selector() } @@ -297,7 +301,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgresScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -307,7 +311,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgresScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -351,7 +355,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgresScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -389,7 +393,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgresScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -426,7 +430,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgresScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 4643cc7b..3abc9ad4 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -16,7 +16,7 @@ use crate::models::DatabaseMode::MANAGED; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Redis { +pub struct RedisScw { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct Redis { logger: Box, } -impl Redis { +impl RedisScw { pub fn new( context: Context, id: &str, @@ -83,13 +83,17 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Database( self.id().to_string(), @@ -99,7 +103,7 @@ impl ToTransmitter for Redis { } } -impl Service for Redis { +impl Service for RedisScw { fn context(&self) -> &Context { &self.context } @@ -222,18 +226,18 @@ impl Service for Redis { Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for Redis {} +impl Database for RedisScw {} -impl Helm for Redis { +impl Helm for RedisScw { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +259,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -265,7 +269,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -308,7 +312,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -345,7 +349,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -382,7 +386,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 93a5d99c..4844331d 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -16,7 +16,7 @@ use crate::logger::{LogLevel, Logger}; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Router { +pub struct RouterScw { context: Context, id: String, action: Action, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterScw { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterScw { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterScw { fn context(&self) -> &Context { &self.context } @@ -133,8 +133,8 @@ impl Service for Router { let mut context = default_tera_context(self, kubernetes, environment); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -203,7 +203,7 @@ impl Service for Router { } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterScw { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -219,7 +219,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterScw { fn helm_selector(&self) -> Option { self.selector() } @@ -241,7 +241,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterScw { fn listeners(&self) -> &Listeners { &self.listeners } @@ -251,15 +251,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterScw { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -376,7 +380,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterScw { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -410,7 +414,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 182700ce..1948bda1 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -5,9 +5,9 @@ use std::sync::mpsc::TryRecvError; use std::thread; use std::time::Duration; +use crate::build_platform::Build; use tera::Context as TeraContext; -use crate::build_platform::Image; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::kubernetes::Kubernetes; @@ -119,6 +119,7 @@ pub trait Service: ToTransmitter { } pub trait StatelessService: Service + Create + Pause + Delete { + fn as_stateless_service(&self) -> &dyn StatelessService; fn exec_action(&self, deployment_target: &DeploymentTarget) -> Result<(), EngineError> { match self.action() { crate::cloud_provider::service::Action::Create => self.on_create(deployment_target), @@ -139,6 +140,7 @@ pub trait StatelessService: Service + Create + Pause + Delete { } pub trait StatefulService: Service + Create + Pause + Delete { + fn as_stateful_service(&self) -> &dyn StatefulService; fn exec_action(&self, deployment_target: &DeploymentTarget) -> Result<(), EngineError> { match self.action() { crate::cloud_provider::service::Action::Create => self.on_create(deployment_target), @@ -159,10 +161,9 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } - pub trait Application: StatelessService { - fn image(&self) -> &Image; - fn set_image(&mut self, image: Image); + fn get_build(&self) -> &Build; + fn get_build_mut(&mut self) -> &mut Build; } pub trait Router: StatelessService + Listen + Helm { @@ -997,7 +998,7 @@ pub enum CheckAction { pub fn check_kubernetes_service_error( result: Result<(), EngineError>, kubernetes: &dyn Kubernetes, - service: &Box, + service: &T, event_details: EventDetails, logger: &dyn Logger, deployment_target: &DeploymentTarget, diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index aec59d1b..461593fe 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -30,14 +30,17 @@ pub struct DOCR { impl DOCR { pub fn new(context: Context, id: &str, name: &str, api_key: &str) -> Result { let registry_name = name.to_string(); + let registry_name2 = name.to_string(); let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); let _ = registry.set_username(&api_key); let _ = registry.set_password(Some(&api_key)); + let registry_info = ContainerRegistryInfo { endpoint: registry, registry_name: name.to_string(), registry_docker_json_config: None, get_image_name: Box::new(move |img_name| format!("{}/{}", registry_name, img_name)), + get_repository_name: Box::new(move |_| registry_name2.to_string()), }; let cr = DOCR { diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 26e3aa6e..4f82b3ce 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -66,6 +66,7 @@ impl ECR { registry_name: cr.name.to_string(), registry_docker_json_config: None, get_image_name: Box::new(|img_name| img_name.to_string()), + get_repository_name: Box::new(|imag_name| imag_name.to_string()), }; cr.registry_info = Some(registry_info); diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index d5d07fef..67b2be00 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -3,6 +3,8 @@ use url::Url; use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; +use crate::errors::EngineError; +use crate::events::EventDetails; use crate::models::{Context, Listen}; pub mod docr; @@ -37,6 +39,10 @@ pub trait ContainerRegistry: Listen { fn does_image_exists(&self, image: &Image) -> bool; } +pub fn to_engine_error(event_details: EventDetails, err: ContainerRegistryError) -> EngineError { + EngineError::new_container_registry_error(event_details, err) +} + pub struct ContainerRegistryInfo { pub endpoint: Url, // Contains username and password if necessary pub registry_name: String, @@ -46,6 +52,9 @@ pub struct ContainerRegistryInfo { // i.e: fo scaleway => image_name/image_name // i.e: for AWS => image_name pub get_image_name: Box String>, + + // Give it the name of your image, and it return the name of the repository that will be used + pub get_repository_name: Box String>, } pub struct PushResult { diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 148000e1..dd07b607 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -52,6 +52,7 @@ impl ScalewayCR { zone.region().as_str(), )), get_image_name: Box::new(move |img_name| format!("{}/{}", img_name, img_name)), + get_repository_name: Box::new(|img_name| img_name.to_string()), }; let cr = ScalewayCR { diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 3cc36601..208c55d3 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -1732,6 +1732,24 @@ impl EngineError { /// /// * `event_details`: Error linked event details. /// * `error`: Raw error message. + pub fn new_container_registry_error(event_details: EventDetails, error: ContainerRegistryError) -> EngineError { + EngineError::new( + event_details, + Tag::HelmChartUninstallError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + + /// Creates new error from an Container Registry error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. pub fn new_helm_error(event_details: EventDetails, error: HelmError) -> EngineError { let cmd_error = match &error { HelmError::CmdError(_, _, cmd_error) => Some(cmd_error.clone()), diff --git a/src/models.rs b/src/models.rs index 0e830377..80b31ce7 100644 --- a/src/models.rs +++ b/src/models.rs @@ -14,11 +14,26 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::build_platform::{Build, BuildOptions, Credentials, GitRepository, Image, SshKey}; -use crate::cloud_provider::aws::databases::mongodb::MongoDB; -use crate::cloud_provider::aws::databases::mysql::MySQL; -use crate::cloud_provider::aws::databases::postgresql::PostgreSQL; -use crate::cloud_provider::aws::databases::redis::Redis; -use crate::cloud_provider::service::{DatabaseOptions, StatefulService, StatelessService}; +use crate::cloud_provider::aws::application::ApplicationAws; +use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; +use crate::cloud_provider::aws::databases::mysql::MySQLAws; +use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; +use crate::cloud_provider::aws::databases::redis::RedisAws; +use crate::cloud_provider::aws::router::RouterAws; +use crate::cloud_provider::digitalocean::application::ApplicationDo; +use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; +use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; +use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; +use crate::cloud_provider::digitalocean::databases::redis::RedisDo; +use crate::cloud_provider::digitalocean::router::RouterDo; +use crate::cloud_provider::environment::Environment; +use crate::cloud_provider::scaleway::application::ApplicationScw; +use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; +use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; +use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; +use crate::cloud_provider::scaleway::databases::redis::RedisScw; +use crate::cloud_provider::scaleway::router::RouterScw; +use crate::cloud_provider::service::DatabaseOptions; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; @@ -78,7 +93,7 @@ impl Display for QoveryIdentifier { } #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct Environment { +pub struct EnvironmentRequest { pub execution_id: String, pub id: String, pub owner_id: String, @@ -91,53 +106,44 @@ pub struct Environment { pub clone_from_environment_id: Option, } -impl Environment { - pub fn to_qe_environment( +impl EnvironmentRequest { + pub fn to_environment_domain( &self, context: &Context, cloud_provider: &dyn CloudProvider, container_registry: &ContainerRegistryInfo, logger: Box, - ) -> crate::cloud_provider::environment::Environment { + ) -> Environment { + //FIXME: remove those flatten as it hide errors regarding conversion to model data type let applications = self .applications .iter() - .map(|x| x.to_stateless_service(context, x.to_image(container_registry), cloud_provider, logger.clone())) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) + .filter_map(|x| { + x.to_application_domain(context, x.to_build(container_registry), cloud_provider, logger.clone()) + }) .collect::>(); let routers = self .routers .iter() - .map(|x| x.to_stateless_service(context, cloud_provider, logger.clone())) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) + .filter_map(|x| x.to_router_domain(context, cloud_provider, logger.clone())) .collect::>(); - // orders is important, first external services, then applications and then routers. - let mut stateless_services = applications; - // routers are deployed lastly to avoid to be blacklisted if we request TLS certificates - // while an app does not start for some reason. - stateless_services.extend(routers); - let databases = self .databases .iter() - .map(|x| x.to_stateful_service(context, cloud_provider, logger.clone())) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) + .filter_map(|x| x.to_database_domain(context, cloud_provider, logger.clone())) .collect::>(); - let stateful_services = databases; - - crate::cloud_provider::environment::Environment::new( + Environment::new( self.id.as_str(), self.project_id.as_str(), self.owner_id.as_str(), self.organization_id.as_str(), - stateless_services, - stateful_services, + self.action.to_service_action(), + applications, + routers, + databases, ) } } @@ -211,18 +217,18 @@ pub struct Application { } impl Application { - pub fn to_application<'a>( + pub fn to_application_domain( &self, context: &Context, - image: &Image, + build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(crate::cloud_provider::aws::application::Application::new( + CPKind::Aws => Some(Box::new(ApplicationAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -234,124 +240,48 @@ impl Application { self.min_instances, self.max_instances, self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - listeners, - logger, - ))), - CPKind::Do => Some(Box::new( - crate::cloud_provider::digitalocean::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger, - ), - )), - CPKind::Scw => Some(Box::new( - crate::cloud_provider::scaleway::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger, - ), - )), - } - } - - pub fn to_stateless_service( - &self, - context: &Context, - image: Image, - cloud_provider: &dyn CloudProvider, - logger: Box, - ) -> Option> { - let environment_variables = to_environment_variable(&self.environment_vars); - let listeners = cloud_provider.listeners().clone(); - - match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(crate::cloud_provider::aws::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, + build, self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), environment_variables, listeners, logger.clone(), ))), - CPKind::Do => Some(Box::new( - crate::cloud_provider::digitalocean::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ), - )), - CPKind::Scw => Some(Box::new( - crate::cloud_provider::scaleway::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ), - )), + CPKind::Do => Some(Box::new(ApplicationDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + listeners, + logger.clone(), + ))), + CPKind::Scw => Some(Box::new(ApplicationScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + listeners, + logger.clone(), + ))), } } @@ -369,6 +299,7 @@ impl Application { registry_name: cr_info.registry_name.clone(), registry_url: cr_info.endpoint.clone(), registry_docker_json_config: cr_info.registry_docker_json_config.clone(), + repository_name: (cr_info.get_repository_name)(&self.name), } } @@ -400,18 +331,14 @@ impl Application { let passphrase = self .environment_vars .get(&ssh_key_name.replace(ENV_GIT_PREFIX, "GIT_SSH_PASSPHRASE")) - .map(|val| base64::decode(val).ok()) - .flatten() - .map(|str| String::from_utf8(str).ok()) - .flatten(); + .and_then(|val| base64::decode(val).ok()) + .and_then(|str| String::from_utf8(str).ok()); let public_key = self .environment_vars .get(&ssh_key_name.replace(ENV_GIT_PREFIX, "GIT_SSH_PUBLIC_KEY")) - .map(|val| base64::decode(val).ok()) - .flatten() - .map(|str| String::from_utf8(str).ok()) - .flatten(); + .and_then(|val| base64::decode(val).ok()) + .and_then(|str| String::from_utf8(str).ok()); ssh_keys.push(SshKey { private_key, @@ -459,7 +386,7 @@ impl Application { .iter() .map(|(k, v)| crate::build_platform::EnvironmentVariable { key: k.clone(), - value: String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or(vec![])).into_owned(), + value: String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or_default()).into_owned(), }) .collect::>(), }, @@ -573,12 +500,12 @@ pub struct Router { } impl Router { - pub fn to_stateless_service( + pub fn to_router_domain( &self, context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let custom_domains = self .custom_domains .iter() @@ -601,7 +528,7 @@ impl Router { match cloud_provider.kind() { CPKind::Aws => { - let router: Box = Box::new(crate::cloud_provider::aws::router::Router::new( + let router = Box::new(RouterAws::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -616,23 +543,22 @@ impl Router { Some(router) } CPKind::Do => { - let router: Box = - Box::new(crate::cloud_provider::digitalocean::router::Router::new( - context.clone(), - self.id.as_str(), - self.name.as_str(), - self.action.to_service_action(), - self.default_domain.as_str(), - custom_domains, - routes, - self.sticky_sessions_enabled, - listeners, - logger, - )); + let router = Box::new(RouterDo::new( + context.clone(), + self.id.as_str(), + self.name.as_str(), + self.action.to_service_action(), + self.default_domain.as_str(), + custom_domains, + routes, + self.sticky_sessions_enabled, + listeners, + logger, + )); Some(router) } CPKind::Scw => { - let router: Box = Box::new(crate::cloud_provider::scaleway::router::Router::new( + let router = Box::new(RouterScw::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -695,12 +621,12 @@ pub struct Database { } impl Database { - pub fn to_stateful_service( + pub fn to_database_domain( &self, context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let database_options = DatabaseOptions { mode: self.mode.clone(), login: self.username.clone(), @@ -720,7 +646,7 @@ impl Database { match cloud_provider.kind() { CPKind::Aws => match self.kind { DatabaseKind::Postgresql => { - let db: Box = Box::new(PostgreSQL::new( + let db = Box::new(PostgreSQLAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -739,7 +665,7 @@ impl Database { Some(db) } DatabaseKind::Mysql => { - let db: Box = Box::new(MySQL::new( + let db = Box::new(MySQLAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -758,7 +684,7 @@ impl Database { Some(db) } DatabaseKind::Mongodb => { - let db: Box = Box::new(MongoDB::new( + let db = Box::new(MongoDbAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -777,7 +703,7 @@ impl Database { Some(db) } DatabaseKind::Redis => { - let db: Box = Box::new(Redis::new( + let db = Box::new(RedisAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -798,83 +724,78 @@ impl Database { }, CPKind::Do => match self.kind { DatabaseKind::Postgresql => { - let db: Box = Box::new( - crate::cloud_provider::digitalocean::databases::postgresql::PostgreSQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - ), - ); + let db = Box::new(PostgresDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Mysql => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::mysql::MySQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(MySQLDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Redis => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::redis::Redis::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(RedisDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Mongodb => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::mongodb::MongoDB::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(MongoDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } @@ -882,22 +803,21 @@ impl Database { CPKind::Scw => match self.kind { DatabaseKind::Postgresql => match VersionsNumber::from_str(self.version.as_str()) { Ok(v) => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::postgresql::PostgreSQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + let db = Box::new(PostgresScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + v, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger.clone(), + )); Some(db) } @@ -911,22 +831,21 @@ impl Database { }, DatabaseKind::Mysql => match VersionsNumber::from_str(self.version.as_str()) { Ok(v) => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::mysql::MySQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + let db = Box::new(MySQLScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + v, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger.clone(), + )); Some(db) } @@ -939,42 +858,40 @@ impl Database { } }, DatabaseKind::Redis => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::redis::Redis::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + let db = Box::new(RedisScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger.clone(), + )); Some(db) } DatabaseKind::Mongodb => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::mongodb::MongoDB::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(MongoDbScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } @@ -1255,7 +1172,7 @@ impl Context { pub fn resource_expiration_in_seconds(&self) -> Option { match &self.metadata { - Some(meta) => meta.resource_expiration_in_seconds.map(|ttl| ttl), + Some(meta) => meta.resource_expiration_in_seconds, _ => None, } } @@ -1358,13 +1275,13 @@ impl Domain { } fn is_wildcarded(&self) -> bool { - self.raw.starts_with("*") + self.raw.starts_with('*') } } impl Display for Domain { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(&self.raw.as_str()) + f.write_str(self.raw.as_str()) } } @@ -1382,7 +1299,7 @@ impl ToHelmString for Domain { impl ToTerraformString for Ipv4Addr { fn to_terraform_format_string(&self) -> String { - format!("{{{}}}", self.to_string()) + format!("{{{}}}", self) } } diff --git a/src/transaction.rs b/src/transaction.rs index b646f388..4a9f04d8 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,20 +1,23 @@ +use crate::cloud_provider::environment::Environment; +use std::cell::RefCell; +use std::rc::Rc; use std::thread; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::Service; +use crate::cloud_provider::service::{Action, Service}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::to_engine_error; use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; -use crate::events::{EngineEvent, EventMessage}; +use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, Transmitter}; use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Action, Environment, EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::models::{EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier}; pub struct Transaction<'a> { engine: &'a EngineConfig, logger: Box, - steps: Vec>, - executed_steps: Vec>, + steps: Vec, + executed_steps: Vec, current_step: StepName, is_transaction_aborted: Box bool>, on_step_change: Box, @@ -46,6 +49,19 @@ impl<'a> Transaction<'a> { Ok(tx) } + fn get_event_details(&self, stage: Stage, transmitter: Transmitter) -> EventDetails { + let context = self.engine.context(); + EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + transmitter, + ) + } + pub fn set_current_step(&mut self, step: StepName) { (self.on_step_change)(&step); self.current_step = step; @@ -66,7 +82,7 @@ impl<'a> Transaction<'a> { Ok(()) } - pub fn deploy_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { + pub fn deploy_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { self.deploy_environment_with_options( environment, DeploymentOption { @@ -78,39 +94,39 @@ impl<'a> Transaction<'a> { pub fn deploy_environment_with_options( &mut self, - environment: &'a Environment, + environment: &Rc>, option: DeploymentOption, ) -> Result<(), EnvironmentError> { // add build step - self.steps.push(Step::BuildEnvironment(environment, option)); + self.steps.push(Step::BuildEnvironment(environment.clone(), option)); // add deployment step - self.steps.push(Step::DeployEnvironment(environment)); + self.steps.push(Step::DeployEnvironment(environment.clone())); Ok(()) } - pub fn pause_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { - self.steps.push(Step::PauseEnvironment(environment)); + pub fn pause_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { + self.steps.push(Step::PauseEnvironment(environment.clone())); Ok(()) } - pub fn delete_environment(&mut self, environment: &'a Environment) -> Result<(), EnvironmentError> { - self.steps.push(Step::DeleteEnvironment(environment)); + pub fn delete_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { + self.steps.push(Step::DeleteEnvironment(environment.clone())); Ok(()) } fn build_and_push_applications( &self, - environment: &Environment, + environment: &mut Environment, option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications - let apps_to_build = environment + let mut apps_to_build = environment .applications - .iter() + .iter_mut() // build only applications that are set with Action: Create - .filter(|app| app.action == Action::Create) + .filter(|app| *app.action() == Action::Create) .collect::>(); // If nothing to build, do nothing @@ -118,27 +134,40 @@ impl<'a> Transaction<'a> { return Ok(()); } + // To convert ContainerError to EngineError + let cr_to_engine_error = |err: ContainerRegistryError| -> EngineError { + let event_details = self.get_event_details( + Stage::Environment(EnvironmentStep::Build), + Transmitter::ContainerRegistry( + self.engine.container_registry().id().to_string(), + self.engine.container_registry().name().to_string(), + ), + ); + to_engine_error(event_details, err) + }; + // Do setup of registry and be sure we are login to the registry let cr_registry = self.engine.container_registry(); - let _ = cr_registry.create_registry(); - let registry = self.engine.container_registry().registry_info(); - - for app in apps_to_build.into_iter() { - let app_build = app.to_build(®istry); + let _ = cr_registry.create_registry().map_err(cr_to_engine_error)?; + for app in apps_to_build.iter_mut() { // If image already exist in the registry, skip the build - if !option.force_build && cr_registry.does_image_exists(&app_build.image) { + if !option.force_build && cr_registry.does_image_exists(&app.get_build().image) { continue; } // Be sure that our repository exist before trying to pull/push images from it - let _ = self.engine.container_registry().create_repository(&app.name); + let _ = self + .engine + .container_registry() + .create_repository(app.get_build().image.repository_name()) + .map_err(cr_to_engine_error)?; // Ok now everything is setup, we can try to build the app let _ = self .engine .build_platform() - .build(app_build, &self.is_transaction_aborted)?; + .build(app.get_build_mut(), &self.is_transaction_aborted)?; } Ok(()) @@ -170,13 +199,13 @@ impl<'a> Transaction<'a> { } Step::DeployEnvironment(environment_action) => { // revert environment deployment - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } Step::PauseEnvironment(environment_action) => { - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } Step::DeleteEnvironment(environment_action) => { - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } } } @@ -187,32 +216,10 @@ impl<'a> Transaction<'a> { // Warning: This function function does not revert anything, it just there to grab info from kube and services if it fails // FIXME: Cleanup this, qe_environment should not be rebuilt at this step fn rollback_environment(&self, environment: &Environment) -> Result<(), RollbackError> { - let registry_info = self.engine.container_registry().registry_info(); - - let qe_environment = |environment: &Environment| { - let qe_environment = environment.to_qe_environment( - self.engine.context(), - self.engine.cloud_provider(), - ®istry_info, - self.logger.clone(), - ); - - qe_environment - }; - - // revert changes but there is no failover environment - let target_qe_environment = qe_environment(environment); - let action = match environment.action { - Action::Create => self - .engine - .kubernetes() - .deploy_environment_error(&target_qe_environment), - Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), - Action::Delete => self - .engine - .kubernetes() - .delete_environment_error(&target_qe_environment), + Action::Create => self.engine.kubernetes().deploy_environment_error(&environment), + Action::Pause => self.engine.kubernetes().pause_environment_error(&environment), + Action::Delete => self.engine.kubernetes().delete_environment_error(&environment), Action::Nothing => Ok(()), }; @@ -261,13 +268,13 @@ impl<'a> Transaction<'a> { } }; } - Step::BuildEnvironment(target_environment, option) => { + Step::BuildEnvironment(environment, option) => { if (self.is_transaction_aborted)() { return TransactionResult::Canceled; } // build applications - match self.build_and_push_applications(target_environment, &option) { + match self.build_and_push_applications(&mut (environment.as_ref().borrow_mut()), &option) { Ok(apps) => apps, Err(engine_err) => { self.logger.log( @@ -294,7 +301,7 @@ impl<'a> Transaction<'a> { } // deploy complete environment - match self.commit_environment(environment_action, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().deploy_environment(qe_env) }) { TransactionResult::Ok => {} @@ -310,7 +317,7 @@ impl<'a> Transaction<'a> { } // pause complete environment - match self.commit_environment(environment_action, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().pause_environment(qe_env) }) { TransactionResult::Ok => {} @@ -326,7 +333,7 @@ impl<'a> Transaction<'a> { } // delete complete environment - match self.commit_environment(environment_action, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().delete_environment(qe_env) }) { TransactionResult::Ok => {} @@ -400,25 +407,17 @@ impl<'a> Transaction<'a> { } } - fn commit_environment(&self, target_environment: &Environment, action_fn: F) -> TransactionResult + fn commit_environment(&self, environment: &Environment, action_fn: F) -> TransactionResult where - F: Fn(&crate::cloud_provider::environment::Environment) -> Result<(), EngineError>, + F: Fn(&Environment) -> Result<(), EngineError>, { - let registry_info = self.engine.container_registry().registry_info(); - let qe_environment = target_environment.to_qe_environment( - self.engine.context(), - self.engine.cloud_provider(), - ®istry_info, - self.logger.clone(), - ); - let execution_id = self.engine.context().execution_id(); // send back the right progress status fn send_progress( kubernetes: &dyn Kubernetes, action: &Action, - service: &Box, + service: &T, execution_id: &str, is_error: bool, ) where @@ -455,7 +454,7 @@ impl<'a> Transaction<'a> { // Even by storing data at the micro seconds precision thread::sleep(std::time::Duration::from_millis(100)); - let _ = match action_fn(&qe_environment) { + let _ = match action_fn(&environment) { Err(err) => { let rollback_result = match self.rollback() { Ok(_) => TransactionResult::Rollback(err), @@ -467,20 +466,20 @@ impl<'a> Transaction<'a> { // !!! don't change the order // terminal update - for service in &qe_environment.stateful_services { + for service in environment.stateful_services() { send_progress( self.engine.kubernetes(), - &target_environment.action, + &environment.action, service, execution_id, true, ); } - for service in &qe_environment.stateless_services { + for service in environment.stateless_services() { send_progress( self.engine.kubernetes(), - &target_environment.action, + &environment.action, service, execution_id, true, @@ -491,20 +490,20 @@ impl<'a> Transaction<'a> { } _ => { // terminal update - for service in &qe_environment.stateful_services { + for service in environment.stateful_services() { send_progress( self.engine.kubernetes(), - &target_environment.action, + &environment.action, service, execution_id, false, ); } - for service in &qe_environment.stateless_services { + for service in environment.stateless_services() { send_progress( self.engine.kubernetes(), - &target_environment.action, + &environment.action, service, execution_id, false, @@ -550,18 +549,18 @@ impl StepName { } } -pub enum Step<'a> { +pub enum Step { // init and create all the necessary resources (Network, Kubernetes) CreateKubernetes, DeleteKubernetes, PauseKubernetes, - BuildEnvironment(&'a Environment, DeploymentOption), - DeployEnvironment(&'a Environment), - PauseEnvironment(&'a Environment), - DeleteEnvironment(&'a Environment), + BuildEnvironment(Rc>, DeploymentOption), + DeployEnvironment(Rc>), + PauseEnvironment(Rc>), + DeleteEnvironment(Rc>), } -impl<'a> Step<'a> { +impl Step { fn step_name(&self) -> StepName { match self { Step::CreateKubernetes => StepName::CreateKubernetes, @@ -575,16 +574,16 @@ impl<'a> Step<'a> { } } -impl<'a> Clone for Step<'a> { +impl Clone for Step { fn clone(&self) -> Self { match self { Step::CreateKubernetes => Step::CreateKubernetes, Step::DeleteKubernetes => Step::DeleteKubernetes, Step::PauseKubernetes => Step::PauseKubernetes, - Step::BuildEnvironment(e, option) => Step::BuildEnvironment(*e, option.clone()), - Step::DeployEnvironment(e) => Step::DeployEnvironment(*e), - Step::PauseEnvironment(e) => Step::PauseEnvironment(*e), - Step::DeleteEnvironment(e) => Step::DeleteEnvironment(*e), + Step::BuildEnvironment(e, option) => Step::BuildEnvironment(e.clone(), option.clone()), + Step::DeployEnvironment(e) => Step::DeployEnvironment(e.clone()), + Step::PauseEnvironment(e) => Step::PauseEnvironment(e.clone()), + Step::DeleteEnvironment(e) => Step::DeleteEnvironment(e.clone()), } } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index b3631da5..176352dc 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -2,12 +2,13 @@ extern crate serde; extern crate serde_derive; use chrono::Utc; +use std::cell::RefCell; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::models::{ - Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, Environment, GitCredentials, - Port, Protocol, Route, Router, Storage, StorageType, + Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, + GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; @@ -39,6 +40,7 @@ use qovery_engine::models::DatabaseMode::CONTAINER; use qovery_engine::transaction::{DeploymentOption, Transaction, TransactionResult}; use std::collections::BTreeMap; use std::path::Path; +use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use tracing::{span, Level}; @@ -70,34 +72,42 @@ pub trait Cluster { pub trait Infrastructure { fn deploy_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn pause_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; fn delete_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; } -impl Infrastructure for Environment { +impl Infrastructure for EnvironmentRequest { fn deploy_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment.to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ); + + let env = Rc::new(RefCell::new(env)); let _ = tx.deploy_environment_with_options( - &environment, + &env, DeploymentOption { force_build: true, force_push: true, @@ -109,24 +119,38 @@ impl Infrastructure for Environment { fn pause_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.pause_environment(&environment); + let env = environment.to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ); + let env = Rc::new(RefCell::new(env)); + let _ = tx.pause_environment(&env); tx.commit() } fn delete_environment( &self, - environment: &Environment, + environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.delete_environment(&environment); + let env = environment.to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ); + let env = Rc::new(RefCell::new(env)); + let _ = tx.delete_environment(&env); tx.commit() } @@ -144,7 +168,7 @@ pub fn environment_3_apps_3_routers_3_databases( database_instance_type: &str, database_disk_type: &str, provider_kind: Kind, -) -> Environment { +) -> EnvironmentRequest { let app_name_1 = format!("{}-{}", "simple-app-1".to_string(), generate_id()); let app_name_2 = format!("{}-{}", "simple-app-2".to_string(), generate_id()); let app_name_3 = format!("{}-{}", "simple-app-3".to_string(), generate_id()); @@ -177,7 +201,7 @@ pub fn environment_3_apps_3_routers_3_databases( let database_username_2 = "superuser2".to_string(); let database_name_2 = "postgres2".to_string(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -439,7 +463,7 @@ pub fn environment_3_apps_3_routers_3_databases( } } -pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Environment { +pub fn working_minimal_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); @@ -451,7 +475,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi context.cluster_id().to_string(), test_domain ); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -509,12 +533,12 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi } } -pub fn database_test_environment(context: &Context) -> Environment { +pub fn database_test_environment(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -552,7 +576,10 @@ pub fn database_test_environment(context: &Context) -> Environment { } } -pub fn environment_only_http_server_router_with_sticky_session(context: &Context, test_domain: &str) -> Environment { +pub fn environment_only_http_server_router_with_sticky_session( + context: &Context, + test_domain: &str, +) -> EnvironmentRequest { let mut env = environment_only_http_server_router(context, test_domain.clone()); for mut router in &mut env.routers { @@ -568,7 +595,7 @@ pub fn environnement_2_app_2_routers_1_psql( database_instance_type: &str, database_disk_type: &str, provider_kind: Kind, -) -> Environment { +) -> EnvironmentRequest { let fqdn = get_svc_name(DatabaseKind::Postgresql, provider_kind.clone()).to_string(); let database_port = 5432; @@ -580,7 +607,7 @@ pub fn environnement_2_app_2_routers_1_psql( let application_name1 = sanitize_name("postgresql", &format!("{}-{}", "postgresql-app1", &suffix)); let application_name2 = sanitize_name("postgresql", &format!("{}-{}", "postgresql-app2", &suffix)); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -735,7 +762,7 @@ pub fn environnement_2_app_2_routers_1_psql( } } -pub fn non_working_environment(context: &Context, test_domain: &str) -> Environment { +pub fn non_working_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let mut environment = working_minimal_environment(context, test_domain); environment.applications = environment @@ -754,9 +781,9 @@ pub fn non_working_environment(context: &Context, test_domain: &str) -> Environm // echo app environment is an environment that contains http-echo container (forked from hashicorp) // ECHO_TEXT var will be the content of the application root path -pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment { +pub fn echo_app_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -817,9 +844,9 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment } } -pub fn environment_only_http_server(context: &Context) -> Environment { +pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -866,9 +893,9 @@ pub fn environment_only_http_server(context: &Context) -> Environment { } } -pub fn environment_only_http_server_router(context: &Context, test_domain: &str) -> Environment { +pub fn environment_only_http_server_router(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -963,7 +990,7 @@ pub fn routers_sessions_are_sticky(routers: Vec) -> bool { pub fn test_db( context: Context, logger: Box, - mut environment: Environment, + mut environment: EnvironmentRequest, secrets: FuncTestsSecrets, version: &str, test_name: &str, @@ -1369,7 +1396,7 @@ pub fn cluster_test( minor_boot_version: u8, cluster_domain: &ClusterDomain, vpc_network_mode: Option, - environment_to_deploy: Option<&Environment>, + environment_to_deploy: Option<&EnvironmentRequest>, ) -> String { init(); @@ -1425,7 +1452,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - if let Err(err) = deploy_env_tx.deploy_environment(env) { + let env = env.to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ); + let env = Rc::new(RefCell::new(env)); + if let Err(err) = deploy_env_tx.deploy_environment(&env) { panic!("{:?}", err) } @@ -1537,7 +1571,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - if let Err(err) = destroy_env_tx.delete_environment(env) { + let env = env.to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ); + let env = Rc::new(RefCell::new(env)); + if let Err(err) = destroy_env_tx.delete_environment(&env) { panic!("{:?}", err) } assert!(matches!(destroy_env_tx.commit(), TransactionResult::Ok)); diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 481d76ec..e5ac3dc1 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -7,7 +7,7 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, Environment}; +use qovery_engine::models::{Context, EnvironmentRequest}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; @@ -162,7 +162,7 @@ impl Cluster for DO { pub fn clean_environments( context: &Context, - _environments: Vec, + _environments: Vec, secrets: FuncTestsSecrets, _region: DoRegion, ) -> Result<(), EngineError> { diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 86e4f27d..b9159a03 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -6,7 +6,7 @@ use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, Environment}; +use qovery_engine::models::{Context, EnvironmentRequest}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; @@ -222,7 +222,7 @@ pub fn scw_object_storage(context: Context, region: ScwZone) -> ScalewayOS { pub fn clean_environments( context: &Context, - environments: Vec, + environments: Vec, secrets: FuncTestsSecrets, zone: ScwZone, ) -> Result<(), ContainerRegistryError> { diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index df9a94e5..5d72e959 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -37,7 +37,7 @@ use qovery_engine::constants::{ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID, DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY, }; -use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, Environment, Features, Metadata}; +use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; use retry::Error::Operation; use serde::{Deserialize, Serialize}; @@ -454,7 +454,7 @@ pub fn generate_password(provider_kind: Kind, db_mode: DatabaseMode) -> String { password } -pub fn check_all_connections(env: &Environment) -> Vec { +pub fn check_all_connections(env: &EnvironmentRequest) -> Vec { let mut checking: Vec = Vec::with_capacity(env.routers.len()); for router_to_test in &env.routers { @@ -795,7 +795,7 @@ fn aws_s3_get_object( pub fn is_pod_restarted_env( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, pod_to_check: &str, secrets: FuncTestsSecrets, ) -> (bool, String) { @@ -830,7 +830,7 @@ pub fn is_pod_restarted_env( pub fn get_pods( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, pod_to_check: &str, secrets: FuncTestsSecrets, ) -> Result, CommandError> { @@ -903,7 +903,7 @@ pub fn generate_cluster_id(region: &str) -> String { pub fn get_pvc( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { let namespace_name = format!( @@ -932,7 +932,7 @@ pub fn get_pvc( pub fn get_svc( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { let namespace_name = format!( From 7feaf7a0d533a2e6f343bd54cab7020c96d48013 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 23 Mar 2022 10:47:12 +0100 Subject: [PATCH 46/85] Fix ECR repository creation (#657) --- src/container_registry/ecr.rs | 46 ++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 4f82b3ce..7c3273a2 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -127,7 +127,6 @@ impl ECR { } fn create_repository(&self, repository_name: &str) -> Result { - let mut repo_creation_counter = 0; let container_registry_request = DescribeRepositoriesRequest { repository_names: Some(vec![repository_name.to_string()]), ..Default::default() @@ -140,36 +139,39 @@ impl ECR { // ensure repository is created // need to do all this checks and retry because of several issues encountered like: 200 API response code while repo is not created let repo_created = retry::retry(Fixed::from_millis(5000).take(24), || { - match block_on( + let repositories = block_on( self.ecr_client() .describe_repositories(container_registry_request.clone()), - ) { - Ok(_x) => OperationResult::Ok(()), - Err(e) => { - match e { - RusotoError::Service(s) => match s { - DescribeRepositoriesError::RepositoryNotFound(_) => { - repo_creation_counter += 1; - } - _ => {} - }, - _ => {} - } + ); + match repositories { + // Repo already exist, so ok + Ok(_) => OperationResult::Ok(()), + // Repo does not exist, so creating it + Err(RusotoError::Service(DescribeRepositoriesError::RepositoryNotFound(_))) => { if let Err(err) = block_on(self.ecr_client().create_repository(crr.clone())) { - return OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { registry_name: self.name.to_string(), repository_name: repository_name.to_string(), raw_error_message: err.to_string(), - })); + })) + } else { + // The Repo should be created at this point, but we want to verify that + // the describe/list return it now. we want to reloop so return a retry instead of a ok + OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: "Retry to check repository exist".to_string(), + })) } - - OperationResult::Err(Err(ContainerRegistryError::CannotCreateRepository { - registry_name: self.name.to_string(), - repository_name: repository_name.to_string(), - raw_error_message: "unknwon error".to_string(), - })) } + + // Unknown error, so retries ¯\_(ツ)_/¯ + Err(err) => OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + })), } }); From 5e6951e2585529e90b72fc065aeaf2b6ec8e4070 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 23 Mar 2022 12:33:58 +0100 Subject: [PATCH 47/85] Terraform: Use same version of aws provider everywhere (#658) --- lib/digitalocean/bootstrap/tf-providers.j2.tf | 2 +- lib/scaleway/bootstrap/tf-providers.j2.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/digitalocean/bootstrap/tf-providers.j2.tf b/lib/digitalocean/bootstrap/tf-providers.j2.tf index bd5bf507..5d5f3474 100644 --- a/lib/digitalocean/bootstrap/tf-providers.j2.tf +++ b/lib/digitalocean/bootstrap/tf-providers.j2.tf @@ -15,7 +15,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.36.0" + version = "~> 3.66.0" } digitalocean = { source = "digitalocean/digitalocean" diff --git a/lib/scaleway/bootstrap/tf-providers.j2.tf b/lib/scaleway/bootstrap/tf-providers.j2.tf index 5157bc29..b097c8bf 100644 --- a/lib/scaleway/bootstrap/tf-providers.j2.tf +++ b/lib/scaleway/bootstrap/tf-providers.j2.tf @@ -13,7 +13,7 @@ terraform { } aws = { source = "hashicorp/aws" - version = "~> 3.36.0" + version = "~> 3.66.0" } local = { source = "hashicorp/local" From 38fe096477db3c61cbdd4a5a837e47096091acd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 23 Mar 2022 18:04:15 +0100 Subject: [PATCH 48/85] Clean local docker builder (#659) Proper error management Move stuff a bit outside of build_and_push_function security to check user avoid requesting data outside of their build context --- .../{docker.rs => dockerfile_utils.rs} | 70 ++-- src/build_platform/local_docker.rs | 370 ++++++------------ src/build_platform/mod.rs | 36 +- src/container_registry/docr.rs | 4 - src/container_registry/ecr.rs | 21 +- src/container_registry/mod.rs | 10 - .../scaleway_container_registry.rs | 4 - src/engine.rs | 8 - src/errors/io.rs | 4 + src/errors/mod.rs | 25 +- src/transaction.rs | 58 ++- 11 files changed, 263 insertions(+), 347 deletions(-) rename src/build_platform/{docker.rs => dockerfile_utils.rs} (65%) diff --git a/src/build_platform/docker.rs b/src/build_platform/dockerfile_utils.rs similarity index 65% rename from src/build_platform/docker.rs rename to src/build_platform/dockerfile_utils.rs index 973f02fe..621ea33f 100644 --- a/src/build_platform/docker.rs +++ b/src/build_platform/dockerfile_utils.rs @@ -24,7 +24,7 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec) -> Result>(); + let x = arg_value.split('=').collect::>(); x.get(0).unwrap_or(&"").to_string() }) .collect::>(); @@ -35,31 +35,17 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec) -> Result, +pub fn match_used_env_var_args<'a>( + env_var_args: &'a [(&'a str, &'a str)], dockerfile_content: Vec, -) -> Result, Utf8Error> { +) -> Result, Utf8Error> { // extract env vars used in the Dockerfile let used_args = extract_dockerfile_args(dockerfile_content)?; - // match env var args and dockerfile env vargs - let env_var_arg_keys = env_var_args - .iter() - .map(|env_var| env_var.split("=").next().unwrap_or(&"").to_string()) - .collect::>(); + let mut matched_env_args = env_var_args.to_vec(); + matched_env_args.retain(|(k, _v)| used_args.contains(*k)); - let matched_env_args_keys = env_var_arg_keys - .intersection(&used_args) - .map(|arg| arg.clone()) - .collect::>(); - - Ok(env_var_args - .into_iter() - .filter(|env_var_arg| { - let env_var_arg_key = env_var_arg.split("=").next().unwrap_or(""); - matched_env_args_keys.contains(env_var_arg_key) - }) - .collect::>()) + Ok(matched_env_args) } #[cfg(test)] @@ -114,26 +100,25 @@ mod tests { assert_eq!(res.unwrap().len(), 4); let env_var_args_to_match = vec![ - "foo=abcdvalue".to_string(), - "bar=abcdvalue".to_string(), - "toto=abcdvalue".to_string(), - "x=abcdvalue".to_string(), + ("foo", "abcdvalue"), + ("bar", "abcdvalue"), + ("toto", "abcdvalue"), + ("x", "abcdvalue"), ]; - let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec()); + let matched_vars = match_used_env_var_args(&env_var_args_to_match, dockerfile.to_vec()); assert_eq!(matched_vars.clone().unwrap(), env_var_args_to_match.clone()); assert_eq!(matched_vars.unwrap().len(), 4); - let matched_vars = match_used_env_var_args( - vec!["toto=abcdvalue".to_string(), "x=abcdvalue".to_string()], - dockerfile.to_vec(), - ); + let args = vec![("toto", "abcdvalue"), ("x", "abcdvalue")]; + let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 2); - let matched_vars = match_used_env_var_args(vec![], dockerfile.to_vec()); + let args = vec![]; + let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 0); @@ -144,7 +129,7 @@ mod tests { RUN ls -lh "; - let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec()); + let matched_vars = match_used_env_var_args(&env_var_args_to_match, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 0); } @@ -180,23 +165,22 @@ mod tests { let res = extract_dockerfile_args(dockerfile.to_vec()); assert_eq!(res.unwrap().len(), 3); - let matched_vars = match_used_env_var_args( - vec![ - "PRISMIC_REPO_NAME=abcdvalue".to_string(), - "PRISMIC_API_KEY=abcdvalue".to_string(), - "PRISMIC_CUSTOM_TYPES_API_TOKEN=abcdvalue".to_string(), - ], - dockerfile.to_vec(), - ); + let args = vec![ + ("PRISMIC_REPO_NAME", "abcdvalue"), + ("PRISMIC_API_KEY", "abcdvalue"), + ("PRISMIC_CUSTOM_TYPES_API_TOKEN", "abcdvalue"), + ]; + let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 3); - let matched_vars = - match_used_env_var_args(vec!["PRISMIC_REPO_NAME=abcdvalue".to_string()], dockerfile.to_vec()); + let args = vec![("PRISMIC_REPO_NAME", "abcdvalue")]; + let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 1); - let matched_vars = match_used_env_var_args(vec![], dockerfile.to_vec()); + let args = vec![]; + let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); assert_eq!(matched_vars.unwrap().len(), 0); } diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 22d2d924..43f5a991 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,12 +6,11 @@ use std::{env, fs}; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; -use crate::build_platform::{docker, Build, BuildPlatform, Credentials, Kind}; +use crate::build_platform::{dockerfile_utils, Build, BuildError, BuildPlatform, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; use crate::cmd::docker::{ContainerImage, Docker, DockerError}; -use crate::errors::{CommandError, EngineError, Tag}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; @@ -40,12 +39,7 @@ pub struct LocalDocker { } impl LocalDocker { - pub fn new( - context: Context, - id: &str, - name: &str, - logger: Box, - ) -> Result> { + pub fn new(context: Context, id: &str, name: &str, logger: Box) -> Result { Ok(LocalDocker { context, id: id.to_string(), @@ -63,20 +57,45 @@ impl LocalDocker { } } - /// Read Dockerfile content from location path and return an array of bytes - fn get_dockerfile_content(&self, dockerfile_path: &str) -> Result, EngineError> { - match fs::read(dockerfile_path) { - Ok(bytes) => Ok(bytes), - Err(err) => { - let engine_error = EngineError::new_docker_cannot_read_dockerfile( + fn reclaim_space_if_needed(&self) { + if env::var_os("CI").is_some() { + self.logger.log( + LogLevel::Info, + EngineEvent::Info( self.get_event_details(), - dockerfile_path.to_string(), - CommandError::new(err.to_string(), None), - ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(engine_error.clone(), None)); - Err(engine_error) - } + EventMessage::new_from_safe( + "CI environment variable found, no docker prune will be made".to_string(), + ), + ), + ); + + return; + } + + // ensure there is enough disk space left before building a new image + let docker_path_string = "/var/lib/docker"; + let docker_path = Path::new(docker_path_string); + + // get system info + let mut system = sysinfo::System::new_all(); + system.refresh_all(); + + for disk in system.get_disks() { + if disk.get_mount_point() == docker_path { + let event_details = self.get_event_details(); + if let Err(e) = check_docker_space_usage_and_clean( + &self.context.docker, + disk, + event_details.clone(), + &*self.logger(), + ) { + self.logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details, EventMessage::new(e.to_string(), Some(e.to_string()))), + ); + } + break; + }; } } @@ -85,10 +104,10 @@ impl LocalDocker { build: &Build, dockerfile_complete_path: &str, into_dir_docker_style: &str, - env_var_args: Vec, + env_var_args: &[(&str, &str)], lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result<(), EngineError> { + ) -> Result<(), BuildError> { let image_to_build = ContainerImage { registry: build.image.registry_url.clone(), name: build.image.name(), @@ -101,38 +120,26 @@ impl LocalDocker { tags: vec!["latest".to_string()], }; - let dockerfile_content = self.get_dockerfile_content(dockerfile_complete_path)?; - let env_var_args = match docker::match_used_env_var_args(env_var_args, dockerfile_content) { + let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { + BuildError::IoError( + build.image.application_id.clone(), + "reading dockerfile content".to_string(), + err, + ) + })?; + let env_var_args = match dockerfile_utils::match_used_env_var_args(env_var_args, dockerfile_content) { Ok(env_var_args) => env_var_args, Err(err) => { - let engine_error = EngineError::new_docker_cannot_extract_env_vars_from_dockerfile( - self.get_event_details(), - dockerfile_complete_path.to_string(), - CommandError::new(err.to_string(), None), - ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(engine_error.clone(), None)); - return Err(engine_error); + let msg = format!("Cannot extract env vars from your dockerfile {}", err); + return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); } }; - // FIXME: pass a Vec<(key, value)> instead of spliting always the string - let env_vars = env_var_args - .into_iter() - .map(|val| { - let (key, value) = val.rsplit_once('=').unwrap(); - (key.to_string(), value.to_string()) - }) - .collect::>(); - let exit_status = self.context.docker.build( - &Path::new(dockerfile_complete_path), - &Path::new(into_dir_docker_style), + Path::new(dockerfile_complete_path), + Path::new(into_dir_docker_style), &image_to_build, - &env_vars - .iter() - .map(|(k, v)| (k.as_str(), v.as_str())) - .collect::>(), + &env_var_args, &image_cache, true, &mut |line| { @@ -170,12 +177,8 @@ impl LocalDocker { match exit_status { Ok(_) => Ok(()), - Err(DockerError::Aborted(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), - Err(err) => Err(EngineError::new_docker_cannot_build_container_image( - self.get_event_details(), - self.name_with_id(), - CommandError::new(format!("{:?}", err), None), - )), + Err(DockerError::Aborted(msg)) => Err(BuildError::Aborted(msg)), + Err(err) => Err(BuildError::DockerError(build.image.application_id.clone(), err)), } } @@ -183,11 +186,11 @@ impl LocalDocker { &self, build: &Build, into_dir_docker_style: &str, - env_var_args: Vec, + env_var_args: &[(&str, &str)], use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result<(), EngineError> { + ) -> Result<(), BuildError> { let name_with_tag = build.image.full_image_name_with_tag(); let name_with_latest_tag = format!("{}:latest", build.image.full_image_name()); @@ -206,19 +209,12 @@ impl LocalDocker { buildpacks_args.extend(vec!["-t", name_with_latest_tag.as_str()]); buildpacks_args.extend(vec!["--path", into_dir_docker_style]); - let mut buildpacks_args = if env_var_args.is_empty() { - buildpacks_args - } else { - let mut build_args = vec![]; - - env_var_args.iter().for_each(|x| { - build_args.push("--env"); - build_args.push(x.as_str()); - }); - - buildpacks_args.extend(build_args); - buildpacks_args - }; + let mut args_buffer = Vec::with_capacity(env_var_args.len()); + for (key, value) in env_var_args { + args_buffer.push("--env".to_string()); + args_buffer.push(format!("{}={}", key, value)); + } + buildpacks_args.extend(args_buffer.iter().map(|value| value.as_str()).collect::>()); buildpacks_args.push("-B"); buildpacks_args.push(builder_name); @@ -240,26 +236,10 @@ impl LocalDocker { } _ => { let msg = format!( - "Cannot build: Invalid buildpacks language format: expected `builder[@version]` got {}", + "Invalid buildpacks language format: expected `builder[@version]` got {}", buildpacks_language ); - lh.deployment_error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Error, - Some(msg.clone()), - self.context.execution_id(), - )); - - let err = EngineError::new_buildpack_invalid_language_format( - self.get_event_details(), - buildpacks_language.to_string(), - ); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - return Err(err); + return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); } } } @@ -319,33 +299,22 @@ impl LocalDocker { match exit_status { Ok(_) => Ok(()), - Err(Killed(_)) => Err(EngineError::new_task_cancellation_requested(self.get_event_details())), - Err(err) => { - let error = EngineError::new_buildpack_cannot_build_container_image( - self.get_event_details(), - self.name_with_id(), - BUILDPACKS_BUILDERS.iter().map(|b| b.to_string()).collect(), - CommandError::new(format!("{:?}", err), None), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - Err(error) - } + Err(Killed(msg)) => Err(BuildError::Aborted(msg)), + Err(err) => Err(BuildError::BuildpackError(build.image.application_id.clone(), err)), } } - fn get_repository_build_root_path(&self, build: &Build) -> Result { + fn get_repository_build_root_path(&self, build: &Build) -> Result { workspace_directory( self.context.workspace_root_dir(), self.context.execution_id(), format!("build/{}", build.image.name.as_str()), ) .map_err(|err| { - EngineError::new_cannot_get_workspace_directory( - self.get_event_details(), - CommandError::new(err.to_string(), None), + BuildError::IoError( + build.image.application_id.clone(), + "when creating build workspace".to_string(), + err, ) }) } @@ -368,49 +337,31 @@ impl BuildPlatform for LocalDocker { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { - if !crate::cmd::command::does_binary_exist("docker") { - return Err(EngineError::new_missing_required_binary( - self.get_event_details(), - "docker".to_string(), - )); - } - - if !crate::cmd::command::does_binary_exist("pack") { - return Err(EngineError::new_missing_required_binary( - self.get_event_details(), - "pack".to_string(), - )); - } - - Ok(()) - } - - fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), EngineError> { + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError> { let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); let app_id = build.image.application_id.clone(); // check if we should already abort the task if is_task_canceled() { - return Err(EngineError::new_task_cancellation_requested(event_details.clone())); + return Err(BuildError::Aborted(build.image.application_id.clone())); } // LOGGING - let repository_root_path = PathBuf::from(self.get_repository_build_root_path(&build)?); + let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); let msg = format!( "Cloning repository: {} to {:?}", build.git_repository.url, repository_root_path ); listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { id: app_id }, + ProgressScope::Application { id: app_id.clone() }, ProgressLevel::Info, Some(msg.clone()), self.context.execution_id(), )); self.logger.log( LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg)), + EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), ); // LOGGING @@ -419,8 +370,8 @@ impl BuildPlatform for LocalDocker { let get_credentials = |user: &str| { let mut creds: Vec<(CredentialType, Cred)> = Vec::with_capacity(build.git_repository.ssh_keys.len() + 1); for ssh_key in build.git_repository.ssh_keys.iter() { - let public_key = ssh_key.public_key.as_ref().map(|x| x.as_str()); - let passphrase = ssh_key.passphrase.as_ref().map(|x| x.as_str()); + let public_key = ssh_key.public_key.as_deref(); + let passphrase = ssh_key.passphrase.as_deref(); if let Ok(cred) = Cred::ssh_key_from_memory(user, public_key, &ssh_key.private_key, passphrase) { creds.push((CredentialType::SSH_MEMORY, cred)); } @@ -429,7 +380,7 @@ impl BuildPlatform for LocalDocker { if let Some(Credentials { login, password }) = &build.git_repository.credentials { creds.push(( CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(&login, &password).unwrap(), + Cred::userpass_plaintext(login, password).unwrap(), )); } @@ -439,7 +390,9 @@ impl BuildPlatform for LocalDocker { // Cleanup, mono repo can require to clone multiple time the same repo // FIXME: re-use the same repo and just checkout at the correct commit if repository_root_path.exists() { - let _ = fs::remove_dir_all(&repository_root_path); + let app_id = app_id; + fs::remove_dir_all(&repository_root_path) + .map_err(|err| BuildError::IoError(app_id, "cleaning old repository".to_string(), err))?; } // Do the real git clone @@ -449,108 +402,55 @@ impl BuildPlatform for LocalDocker { &repository_root_path, &get_credentials, ) { - let error = EngineError::new_builder_clone_repository_error( - self.get_event_details(), - build.git_repository.url.to_string(), - CommandError::new(clone_error.to_string(), None), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + return Err(BuildError::GitError(build.image.application_id.clone(), clone_error)); } if is_task_canceled() { - return Err(EngineError::new_task_cancellation_requested(event_details.clone())); + return Err(BuildError::Aborted(build.image.application_id.clone())); } let mut disable_build_cache = false; - let mut env_var_args: Vec = Vec::with_capacity(build.options.environment_variables.len()); - + let mut env_var_args: Vec<(&str, &str)> = Vec::with_capacity(build.options.environment_variables.len()); for ev in &build.options.environment_variables { if ev.key == "QOVERY_DISABLE_BUILD_CACHE" && ev.value.to_lowercase() == "true" { // this is a special flag to disable build cache dynamically // -- do not pass this env var key/value to as build parameter disable_build_cache = true; } else { - env_var_args.push(format!("{}={}", ev.key, ev.value)); + env_var_args.push((&ev.key, &ev.value)); } } // ensure docker_path is a mounted volume, otherwise ignore because it's not what Qovery does in production // ex: this cause regular cleanup on CI, leading to random tests errors - match env::var_os("CI") { - Some(_) => self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "CI environment variable found, no docker prune will be made".to_string(), - ), - ), - ), - None => { - // ensure there is enough disk space left before building a new image - let docker_path_string = "/var/lib/docker"; - let docker_path = Path::new(docker_path_string); - - // get system info - let mut system = sysinfo::System::new_all(); - system.refresh_all(); - - for disk in system.get_disks() { - if disk.get_mount_point() == docker_path { - let event_details = self.get_event_details(); - if let Err(e) = check_docker_space_usage_and_clean( - &self.context.docker, - disk, - event_details.clone(), - &*self.logger(), - ) { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new(e.to_string(), Some(e.to_string())), - ), - ); - } - break; - }; - } - } - } + self.reclaim_space_if_needed(); let app_id = build.image.application_id.clone(); // Check that the build context is correct let build_context_path = repository_root_path.join(&build.git_repository.root_path); if !build_context_path.is_dir() { - listeners_helper.error(ProgressInfo::new( - ProgressScope::Application { id: app_id.clone() }, - ProgressLevel::Error, - Some(format!( - "Application build context is not present at location {:?}", - build_context_path - )), - self.context.execution_id(), - )); - - let error = EngineError::new_docker_cannot_find_dockerfile( - self.get_event_details(), - build_context_path.to_str().unwrap_or_default().to_string(), + let msg = format!( + "Specified build context path {:?} does not exist within the repository", + &build.git_repository.root_path ); + return Err(BuildError::InvalidConfig(app_id, msg)); + } - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + // Safety check to ensure we can't go up in the directory + if !build_context_path + .canonicalize() + .unwrap_or_default() + .starts_with(repository_root_path.canonicalize().unwrap_or_default()) + { + let msg = format!( + "Specified build context path {:?} tries to access directory outside of his git repository", + &build.git_repository.root_path + ); + return Err(BuildError::InvalidConfig(app_id, msg)); } // now we have to decide if we use buildpack or docker to build our application - // if dockerfile_path is not present it means we need to use buildpack - // If no Dockerfile specified, we should use BuildPacks let result = if let Some(dockerfile_path) = &build.git_repository.dockerfile_path { // build container from the provided Dockerfile @@ -559,34 +459,18 @@ impl BuildPlatform for LocalDocker { // If the dockerfile does not exist, abort if !dockerfile_absolute_path.is_file() { - listeners_helper.error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Error, - Some(format!( - "Dockerfile is not present at location {}", - dockerfile_absolute_path.display() - )), - self.context.execution_id(), - )); - - let error = EngineError::new_docker_cannot_find_dockerfile( - self.get_event_details(), - dockerfile_absolute_path.to_str().unwrap_or_default().to_string(), + let msg = format!( + "Specified dockerfile path {:?} does not exist within the repository", + &dockerfile_path ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + return Err(BuildError::InvalidConfig(app_id, msg)); } self.build_image_with_docker( build, dockerfile_absolute_path.to_str().unwrap_or_default(), build_context_path.to_str().unwrap_or_default(), - env_var_args, + &env_var_args, &listeners_helper, is_task_canceled, ) @@ -595,39 +479,13 @@ impl BuildPlatform for LocalDocker { self.build_image_with_buildpacks( build, build_context_path.to_str().unwrap_or_default(), - env_var_args, + &env_var_args, !disable_build_cache, &listeners_helper, is_task_canceled, ) }; - let msg = match &result { - Ok(_) => format!("✅ Container {} is built", self.name_with_id()), - Err(engine_err) if engine_err.tag() == &Tag::TaskCancellationRequested => { - format!("🚫 Container {} build has been canceled", self.name_with_id()) - } - Err(engine_err) => { - format!( - "❌ Container {} failed to be build: {}", - self.name_with_id(), - engine_err.message() - ) - } - }; - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { id: app_id }, - ProgressLevel::Info, - Some(msg.to_string()), - self.context.execution_id(), - )); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg.to_string())), - ); - result } @@ -666,7 +524,7 @@ fn check_docker_space_usage_and_clean( logger.log( LogLevel::Warning, EngineEvent::Warning( - event_details.clone(), + event_details, EventMessage::new_from_safe(format!( "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge @@ -680,7 +538,7 @@ fn check_docker_space_usage_and_clean( logger.log( LogLevel::Info, EngineEvent::Info( - event_details.clone(), + event_details, EventMessage::new_from_safe(format!( "No need to purge old docker images, only {}% ({}/{}) disk used", 100 - docker_percentage_remaining, diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 50998660..4518e163 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -1,5 +1,7 @@ use serde::{Deserialize, Serialize}; +use crate::cmd::command::CommandError; +use crate::cmd::docker::DockerError; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::logger::Logger; @@ -8,9 +10,37 @@ use std::fmt::{Display, Formatter, Result as FmtResult}; use std::path::PathBuf; use url::Url; -pub mod docker; +pub mod dockerfile_utils; pub mod local_docker; +#[derive(thiserror::Error, Debug)] +pub enum BuildError { + #[error("Cannot build Application {0} due to an invalid config: {1}")] + InvalidConfig(String, String), + + #[error("Cannot build Application {0} due to an error with git: {1}")] + GitError(String, git2::Error), + + #[error("Build of Application {0} have been aborted at user request")] + Aborted(String), + + #[error("Cannot build Application {0} due to an io error: {1} {2}")] + IoError(String, String, std::io::Error), + + #[error("Cannot build Application {0} due to an error with docker: {1}")] + DockerError(String, DockerError), + + #[error("Cannot build Application {0} due to an error with buildpack: {1}")] + BuildpackError(String, CommandError), +} + +pub fn to_engine_error(event_details: EventDetails, err: BuildError) -> EngineError { + match err { + BuildError::Aborted(_) => EngineError::new_task_cancellation_requested(event_details), + _ => EngineError::new_build_error(event_details, err), + } +} + pub trait BuildPlatform: ToTransmitter + Listen { fn context(&self) -> &Context; fn kind(&self) -> Kind; @@ -19,8 +49,7 @@ pub trait BuildPlatform: ToTransmitter + Listen { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), EngineError>; - fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), EngineError>; + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError>; fn logger(&self) -> Box; fn get_event_details(&self) -> EventDetails { let context = self.context(); @@ -94,7 +123,6 @@ impl Image { pub fn registry_host(&self) -> &str { self.registry_url.host_str().unwrap() } - pub fn repository_name(&self) -> &str { &self.repository_name } diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 461593fe..9577b0e9 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -178,10 +178,6 @@ impl ContainerRegistry for DOCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), ContainerRegistryError> { - Ok(()) - } - fn registry_info(&self) -> &ContainerRegistryInfo { &self.registry_info } diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 7c3273a2..2d40f473 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -70,6 +70,7 @@ impl ECR { }; cr.registry_info = Some(registry_info); + cr.is_credentials_valid()?; Ok(cr) } @@ -270,6 +271,16 @@ impl ECR { Ok(ECRCredentials::new(access_token, password, endpoint_url)) } + + fn is_credentials_valid(&self) -> Result<(), ContainerRegistryError> { + let client = StsClient::new_with_client(self.client(), Region::default()); + let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); + + match s { + Ok(_) => Ok(()), + Err(_) => Err(ContainerRegistryError::InvalidCredentials), + } + } } impl ContainerRegistry for ECR { @@ -289,16 +300,6 @@ impl ContainerRegistry for ECR { self.name.as_str() } - fn is_valid(&self) -> Result<(), ContainerRegistryError> { - let client = StsClient::new_with_client(self.client(), Region::default()); - let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); - - match s { - Ok(_) => Ok(()), - Err(_) => Err(ContainerRegistryError::InvalidCredentials), - } - } - fn registry_info(&self) -> &ContainerRegistryInfo { // At this point the registry info should be initialize, so unwrap is safe self.registry_info.as_ref().unwrap() diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 67b2be00..762b38c8 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -20,7 +20,6 @@ pub trait ContainerRegistry: Listen { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), ContainerRegistryError>; // Get info for this registry, url endpoint with login/password, image name convention, ... fn registry_info(&self) -> &ContainerRegistryInfo; @@ -57,15 +56,6 @@ pub struct ContainerRegistryInfo { pub get_repository_name: Box String>, } -pub struct PushResult { - pub image: Image, -} - -pub enum PullResult { - Some(Image), - None, -} - #[derive(Serialize, Deserialize, Clone, Copy, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index dd07b607..898fdb35 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -277,10 +277,6 @@ impl ContainerRegistry for ScalewayCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), ContainerRegistryError> { - Ok(()) - } - fn registry_info(&self) -> &ContainerRegistryInfo { &self.registry_info } diff --git a/src/engine.rs b/src/engine.rs index 770c6a29..58a11192 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -78,14 +78,6 @@ impl EngineConfig { } pub fn is_valid(&self) -> Result<(), EngineConfigError> { - if let Err(e) = self.build_platform.is_valid() { - return Err(EngineConfigError::BuildPlatformNotValid(e)); - } - - if let Err(e) = self.container_registry.is_valid() { - return Err(EngineConfigError::ContainerRegistryNotValid(e)); - } - if let Err(e) = self.cloud_provider.is_valid() { return Err(EngineConfigError::CloudProviderNotValid(e)); } diff --git a/src/errors/io.rs b/src/errors/io.rs index 1caaf02a..9fb29a88 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -75,6 +75,7 @@ pub enum Tag { UnsupportedVersion, CannotGetSupportedVersions, CannotGetCluster, + ContainerRegistryError, ObjectStorageCannotCreateBucket, ObjectStorageCannotPutFileIntoBucket, NoClusterFound, @@ -89,6 +90,7 @@ pub enum Tag { CloudProviderClientInvalidCredentials, VersionNumberParsingError, NotImplementedError, + BuilderError, BuilderDockerCannotFindAnyDockerfile, BuilderDockerCannotReadDockerfile, BuilderDockerCannotExtractEnvVarsFromDockerfile, @@ -220,6 +222,8 @@ impl From for Tag { errors::Tag::ObjectStorageCannotActivateBucketVersioning => { Tag::ObjectStorageCannotActivateBucketVersioning } + errors::Tag::BuilderError => Tag::BuilderError, + errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 208c55d3..5a2638c4 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -2,6 +2,7 @@ pub mod io; extern crate url; +use crate::build_platform::BuildError; use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd; use crate::cmd::docker::DockerError; @@ -260,6 +261,8 @@ pub enum Tag { NotImplementedError, /// TaskCancellationRequested: represents an error where current task cancellation has been requested. TaskCancellationRequested, + /// BuildError: represents an error when trying to build an application. + BuilderError, /// BuilderDockerCannotFindAnyDockerfile: represents an error when trying to get a Dockerfile. BuilderDockerCannotFindAnyDockerfile, /// BuilderDockerCannotReadDockerfile: represents an error while trying to read Dockerfile. @@ -284,6 +287,8 @@ pub enum Tag { DockerPushImageError, /// DockerPullImageError: represents an error when trying to pull a docker image. DockerPullImageError, + /// ContainerRegistryError: represents an error when trying to interact with a repository. + ContainerRegistryError, /// ContainerRegistryRepositoryCreationError: represents an error when trying to create a repository. ContainerRegistryRepositoryCreationError, /// ContainerRegistryRepositorySetLifecycleError: represents an error when trying to set repository lifecycle policy. @@ -1735,7 +1740,25 @@ impl EngineError { pub fn new_container_registry_error(event_details: EventDetails, error: ContainerRegistryError) -> EngineError { EngineError::new( event_details, - Tag::HelmChartUninstallError, + Tag::ContainerRegistryError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + + /// Creates new error from an Build error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. + pub fn new_build_error(event_details: EventDetails, error: BuildError) -> EngineError { + EngineError::new( + event_details, + Tag::BuilderError, error.to_string(), error.to_string(), None, diff --git a/src/transaction.rs b/src/transaction.rs index 4a9f04d8..eb85ed60 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,10 +1,11 @@ +use crate::build_platform::BuildError; use crate::cloud_provider::environment::Environment; use std::cell::RefCell; use std::rc::Rc; use std::thread; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::{Action, Service}; +use crate::cloud_provider::service::{Action, Application, Service}; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::to_engine_error; use crate::engine::{EngineConfig, EngineConfigError}; @@ -118,12 +119,11 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - environment: &mut Environment, + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications - let mut apps_to_build = environment - .applications + let mut apps_to_build = applications .iter_mut() // build only applications that are set with Action: Create .filter(|app| *app.action() == Action::Create) @@ -146,6 +146,16 @@ impl<'a> Transaction<'a> { to_engine_error(event_details, err) }; + let build_event_details = || -> EventDetails { + self.get_event_details( + Stage::Environment(EnvironmentStep::Build), + Transmitter::BuildPlatform( + self.engine.build_platform().id().to_string(), + self.engine.build_platform().name().to_string(), + ), + ) + }; + // Do setup of registry and be sure we are login to the registry let cr_registry = self.engine.container_registry(); let _ = cr_registry.create_registry().map_err(cr_to_engine_error)?; @@ -164,10 +174,43 @@ impl<'a> Transaction<'a> { .map_err(cr_to_engine_error)?; // Ok now everything is setup, we can try to build the app - let _ = self + let build_result = self .engine .build_platform() - .build(app.get_build_mut(), &self.is_transaction_aborted)?; + .build(app.get_build_mut(), &self.is_transaction_aborted); + + // logging + let image_name = app.get_build().image.full_image_name_with_tag(); + let msg = match &build_result { + Ok(_) => format!("✅ Container {} is built", &image_name), + Err(BuildError::Aborted(_)) => format!("🚫 Container {} build has been canceled", &image_name), + Err(err) => format!("❌ Container {} failed to be build: {}", &image_name, err), + }; + + let progress_info = ProgressInfo::new( + ProgressScope::Application { + id: app.id().to_string(), + }, + match build_result.is_ok() { + true => ProgressLevel::Info, + false => ProgressLevel::Error, + }, + Some(msg.to_string()), + self.engine.context().execution_id(), + ); + ListenersHelper::new(self.engine.build_platform().listeners()).deployment_in_progress(progress_info); + + let event_details = build_event_details(); + self.logger.log( + match build_result.is_ok() { + true => LogLevel::Info, + false => LogLevel::Error, + }, + EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg)), + ); + + // Abort if it was an error + let _ = build_result.map_err(|err| crate::build_platform::to_engine_error(event_details, err))?; } Ok(()) @@ -274,7 +317,8 @@ impl<'a> Transaction<'a> { } // build applications - match self.build_and_push_applications(&mut (environment.as_ref().borrow_mut()), &option) { + let applications = &mut (environment.as_ref().borrow_mut()).applications; + match self.build_and_push_applications(applications, &option) { Ok(apps) => apps, Err(engine_err) => { self.logger.log( From 619a1e8bb8f80953ac708e34a54327116881697a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 24 Mar 2022 14:21:01 +0100 Subject: [PATCH 49/85] Correctly propagate new image tag (#660) --- src/build_platform/dockerfile_utils.rs | 95 ++++++++++++-------------- src/build_platform/local_docker.rs | 71 ++++++++++--------- src/build_platform/mod.rs | 17 ++++- src/models.rs | 45 ++++++------ src/utilities.rs | 29 ++++---- test_utilities/src/scaleway.rs | 12 ++-- 6 files changed, 135 insertions(+), 134 deletions(-) diff --git a/src/build_platform/dockerfile_utils.rs b/src/build_platform/dockerfile_utils.rs index 621ea33f..2873b4b3 100644 --- a/src/build_platform/dockerfile_utils.rs +++ b/src/build_platform/dockerfile_utils.rs @@ -32,25 +32,11 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec) -> Result( - env_var_args: &'a [(&'a str, &'a str)], - dockerfile_content: Vec, -) -> Result, Utf8Error> { - // extract env vars used in the Dockerfile - let used_args = extract_dockerfile_args(dockerfile_content)?; - - let mut matched_env_args = env_var_args.to_vec(); - matched_env_args.retain(|(k, _v)| used_args.contains(*k)); - - Ok(matched_env_args) -} - #[cfg(test)] mod tests { use super::*; + use maplit::btreemap; + use std::collections::BTreeMap; #[test] fn test_extract_dockerfile_args() { @@ -99,28 +85,29 @@ mod tests { let res = extract_dockerfile_args(dockerfile.to_vec()); assert_eq!(res.unwrap().len(), 4); - let env_var_args_to_match = vec![ - ("foo", "abcdvalue"), - ("bar", "abcdvalue"), - ("toto", "abcdvalue"), - ("x", "abcdvalue"), + let args = btreemap![ + "foo" => "abcdvalue", + "bar" => "abcdvalue", + "toto" => "abcdvalue", + "x" => "abcdvalue", ]; - let matched_vars = match_used_env_var_args(&env_var_args_to_match, dockerfile.to_vec()); + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret, args); - assert_eq!(matched_vars.clone().unwrap(), env_var_args_to_match.clone()); + let args = btreemap!["toto" => "abcdvalue", "x" => "abcdvalue"]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 2); - assert_eq!(matched_vars.unwrap().len(), 4); - - let args = vec![("toto", "abcdvalue"), ("x", "abcdvalue")]; - let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 2); - - let args = vec![]; - let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let args: BTreeMap<&str, &str> = btreemap![]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); let dockerfile = b" FROM node @@ -129,9 +116,10 @@ mod tests { RUN ls -lh "; - let matched_vars = match_used_env_var_args(&env_var_args_to_match, dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); } #[test] @@ -165,23 +153,26 @@ mod tests { let res = extract_dockerfile_args(dockerfile.to_vec()); assert_eq!(res.unwrap().len(), 3); - let args = vec![ - ("PRISMIC_REPO_NAME", "abcdvalue"), - ("PRISMIC_API_KEY", "abcdvalue"), - ("PRISMIC_CUSTOM_TYPES_API_TOKEN", "abcdvalue"), + let args = btreemap![ + "PRISMIC_REPO_NAME" => "abcdvalue", + "PRISMIC_API_KEY" => "abcdvalue", + "PRISMIC_CUSTOM_TYPES_API_TOKEN" => "abcdvalue", ]; - let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 3); - assert_eq!(matched_vars.unwrap().len(), 3); + let args = btreemap!["PRISMIC_REPO_NAME" => "abcdvalue"]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 1); - let args = vec![("PRISMIC_REPO_NAME", "abcdvalue")]; - let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 1); - - let args = vec![]; - let matched_vars = match_used_env_var_args(&args, dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let args: BTreeMap<&str, &str> = btreemap![]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); } } diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 43f5a991..b269c059 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,7 +6,8 @@ use std::{env, fs}; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; -use crate::build_platform::{dockerfile_utils, Build, BuildError, BuildPlatform, Credentials, Kind}; +use crate::build_platform::dockerfile_utils::extract_dockerfile_args; +use crate::build_platform::{Build, BuildError, BuildPlatform, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; @@ -101,13 +102,35 @@ impl LocalDocker { fn build_image_with_docker( &self, - build: &Build, + build: &mut Build, dockerfile_complete_path: &str, into_dir_docker_style: &str, - env_var_args: &[(&str, &str)], lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, ) -> Result<(), BuildError> { + // Going to inject only env var that are used by the dockerfile + // so extracting it and modifying the image tag and env variables + let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { + BuildError::IoError( + build.image.application_id.clone(), + "reading dockerfile content".to_string(), + err, + ) + })?; + let dockerfile_args = match extract_dockerfile_args(dockerfile_content) { + Ok(dockerfile_args) => dockerfile_args, + Err(err) => { + let msg = format!("Cannot extract env vars from your dockerfile {}", err); + return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); + } + }; + + // Keep only the env variables we want for our build + // and force re-compute the image tag + build.environment_variables.retain(|k, _| dockerfile_args.contains(k)); + build.compute_image_tag(); + + // Prepare image we want to build let image_to_build = ContainerImage { registry: build.image.registry_url.clone(), name: build.image.name(), @@ -120,26 +143,17 @@ impl LocalDocker { tags: vec!["latest".to_string()], }; - let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { - BuildError::IoError( - build.image.application_id.clone(), - "reading dockerfile content".to_string(), - err, - ) - })?; - let env_var_args = match dockerfile_utils::match_used_env_var_args(env_var_args, dockerfile_content) { - Ok(env_var_args) => env_var_args, - Err(err) => { - let msg = format!("Cannot extract env vars from your dockerfile {}", err); - return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); - } - }; + let env_vars: Vec<(&str, &str)> = build + .environment_variables + .iter() + .map(|(k, v)| (k.as_str(), v.as_str())) + .collect(); let exit_status = self.context.docker.build( Path::new(dockerfile_complete_path), Path::new(into_dir_docker_style), &image_to_build, - &env_var_args, + &env_vars, &image_cache, true, &mut |line| { @@ -186,7 +200,6 @@ impl LocalDocker { &self, build: &Build, into_dir_docker_style: &str, - env_var_args: &[(&str, &str)], use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, @@ -209,8 +222,8 @@ impl LocalDocker { buildpacks_args.extend(vec!["-t", name_with_latest_tag.as_str()]); buildpacks_args.extend(vec!["--path", into_dir_docker_style]); - let mut args_buffer = Vec::with_capacity(env_var_args.len()); - for (key, value) in env_var_args { + let mut args_buffer = Vec::with_capacity(build.environment_variables.len()); + for (key, value) in &build.environment_variables { args_buffer.push("--env".to_string()); args_buffer.push(format!("{}={}", key, value)); } @@ -409,18 +422,6 @@ impl BuildPlatform for LocalDocker { return Err(BuildError::Aborted(build.image.application_id.clone())); } - let mut disable_build_cache = false; - let mut env_var_args: Vec<(&str, &str)> = Vec::with_capacity(build.options.environment_variables.len()); - for ev in &build.options.environment_variables { - if ev.key == "QOVERY_DISABLE_BUILD_CACHE" && ev.value.to_lowercase() == "true" { - // this is a special flag to disable build cache dynamically - // -- do not pass this env var key/value to as build parameter - disable_build_cache = true; - } else { - env_var_args.push((&ev.key, &ev.value)); - } - } - // ensure docker_path is a mounted volume, otherwise ignore because it's not what Qovery does in production // ex: this cause regular cleanup on CI, leading to random tests errors self.reclaim_space_if_needed(); @@ -470,7 +471,6 @@ impl BuildPlatform for LocalDocker { build, dockerfile_absolute_path.to_str().unwrap_or_default(), build_context_path.to_str().unwrap_or_default(), - &env_var_args, &listeners_helper, is_task_canceled, ) @@ -479,8 +479,7 @@ impl BuildPlatform for LocalDocker { self.build_image_with_buildpacks( build, build_context_path.to_str().unwrap_or_default(), - &env_var_args, - !disable_build_cache, + !build.disable_cache, &listeners_helper, is_task_canceled, ) diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 4518e163..be1ce938 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use crate::cmd::command::CommandError; use crate::cmd::docker::DockerError; @@ -6,7 +7,9 @@ use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::logger::Logger; use crate::models::{Context, Listen, QoveryIdentifier}; +use crate::utilities::compute_image_tag; use std::fmt::{Display, Formatter, Result as FmtResult}; +use std::hash::Hash; use std::path::PathBuf; use url::Url; @@ -68,11 +71,19 @@ pub trait BuildPlatform: ToTransmitter + Listen { pub struct Build { pub git_repository: GitRepository, pub image: Image, - pub options: BuildOptions, + pub environment_variables: BTreeMap, + pub disable_cache: bool, } -pub struct BuildOptions { - pub environment_variables: Vec, +impl Build { + pub fn compute_image_tag(&mut self) { + self.image.tag = compute_image_tag( + &self.git_repository.root_path, + &self.git_repository.dockerfile_path, + &self.environment_variables, + &self.git_repository.commit_id, + ); + } } #[derive(Clone, Eq, PartialEq, Hash, Debug)] diff --git a/src/models.rs b/src/models.rs index 80b31ce7..aa5fa07a 100644 --- a/src/models.rs +++ b/src/models.rs @@ -13,7 +13,7 @@ use rand::Rng; use serde::{Deserialize, Serialize}; use url::Url; -use crate::build_platform::{Build, BuildOptions, Credentials, GitRepository, Image, SshKey}; +use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::aws::application::ApplicationAws; use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; use crate::cloud_provider::aws::databases::mysql::MySQLAws; @@ -40,7 +40,6 @@ use crate::cloud_provider::Kind as CPKind; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; -use crate::utilities::get_image_tag; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -285,16 +284,11 @@ impl Application { } } - pub fn to_image(&self, cr_info: &ContainerRegistryInfo) -> Image { + fn to_image(&self, cr_info: &ContainerRegistryInfo) -> Image { Image { application_id: self.id.clone(), name: (cr_info.get_image_name)(&self.name), - tag: get_image_tag( - &self.root_path, - &self.dockerfile_path, - &self.environment_vars, - &self.commit_id, - ), + tag: "".to_string(), // It needs to be compute after creation commit_id: self.commit_id.clone(), registry_name: cr_info.registry_name.clone(), registry_url: cr_info.endpoint.clone(), @@ -366,7 +360,8 @@ impl Application { //FIXME: Return a result the function let url = Url::parse(&self.git_url).unwrap_or_else(|_| Url::parse("https://invalid-git-url.com").unwrap()); - Build { + let mut disable_build_cache = false; + let mut build = Build { git_repository: GitRepository { url, credentials: self.git_credentials.as_ref().map(|credentials| Credentials { @@ -380,17 +375,25 @@ impl Application { buildpack_language: self.buildpack_language.clone(), }, image: self.to_image(registry_url), - options: BuildOptions { - environment_variables: self - .environment_vars - .iter() - .map(|(k, v)| crate::build_platform::EnvironmentVariable { - key: k.clone(), - value: String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or_default()).into_owned(), - }) - .collect::>(), - }, - } + environment_variables: self + .environment_vars + .iter() + .filter_map(|(k, v)| { + // Remove special vars + let v = String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or_default()).into_owned(); + if k == "QOVERY_DISABLE_BUILD_CACHE" && v.to_lowercase() == "true" { + disable_build_cache = true; + return None; + } + + Some((k.clone(), v)) + }) + .collect::>(), + disable_cache: disable_build_cache, + }; + + build.compute_image_tag(); + build } } diff --git a/src/utilities.rs b/src/utilities.rs index ef46e10a..c4b002bd 100644 --- a/src/utilities.rs +++ b/src/utilities.rs @@ -1,6 +1,7 @@ use std::collections::hash_map::DefaultHasher; use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; +use std::path::Path; use reqwest::header; use reqwest::header::{HeaderMap, HeaderValue}; @@ -19,11 +20,11 @@ pub fn calculate_hash(t: &T) -> u64 { s.finish() } -pub fn get_image_tag( - root_path: &String, - dockerfile_path: &Option, +pub fn compute_image_tag + Hash, T: AsRef + Hash>( + root_path: P, + dockerfile_path: &Option, environment_variables: &BTreeMap, - commit_id: &String, + commit_id: &str, ) -> String { // Image tag == hash(root_path) + commit_id truncate to 127 char // https://github.com/distribution/distribution/blob/6affafd1f030087d88f88841bf66a8abe2bf4d24/reference/regexp.go#L41 @@ -38,8 +39,6 @@ pub fn get_image_tag( // only use when a Dockerfile is used to prevent build cache miss every single time // we redeploy an app with a env var changed with Buildpacks. dockerfile_path.hash(&mut hasher); - - // TODO check if the environment variables are used in the Dockerfile and only Hash the one that are used environment_variables.hash(&mut hasher); } @@ -51,19 +50,19 @@ pub fn get_image_tag( #[cfg(test)] mod tests_utilities { - use crate::utilities::get_image_tag; + use crate::utilities::compute_image_tag; use std::collections::BTreeMap; #[test] fn test_get_image_tag() { - let image_tag = get_image_tag( + let image_tag = compute_image_tag( &"/".to_string(), &Some("Dockerfile".to_string()), &BTreeMap::new(), &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), ); - let image_tag_2 = get_image_tag( + let image_tag_2 = compute_image_tag( &"/".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), @@ -72,7 +71,7 @@ mod tests_utilities { assert_ne!(image_tag, image_tag_2); - let image_tag_3 = get_image_tag( + let image_tag_3 = compute_image_tag( &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), @@ -81,7 +80,7 @@ mod tests_utilities { assert_ne!(image_tag, image_tag_3); - let image_tag_3_2 = get_image_tag( + let image_tag_3_2 = compute_image_tag( &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), @@ -90,9 +89,9 @@ mod tests_utilities { assert_eq!(image_tag_3, image_tag_3_2); - let image_tag_4 = get_image_tag( + let image_tag_4 = compute_image_tag( &"/".to_string(), - &None, + &None as &Option<&str>, &BTreeMap::new(), &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), ); @@ -100,9 +99,9 @@ mod tests_utilities { let mut env_vars_5 = BTreeMap::new(); env_vars_5.insert("toto".to_string(), "key".to_string()); - let image_tag_5 = get_image_tag( + let image_tag_5 = compute_image_tag( &"/".to_string(), - &None, + &None as &Option<&str>, &env_vars_5, &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), ); diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index b9159a03..1941a90b 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,5 +1,5 @@ use const_format::formatcp; -use qovery_engine::build_platform::Image; +use qovery_engine::build_platform::Build; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; use qovery_engine::cloud_provider::scaleway::Scaleway; @@ -241,15 +241,13 @@ pub fn clean_environments( // delete images created in registry let registry_url = container_registry_client.registry_info(); for env in environments.iter() { - for image in env + for build in env .applications .iter() - .map(|a| a.to_image(®istry_url)) - .collect::>() + .map(|a| a.to_build(®istry_url)) + .collect::>() { - if let Err(e) = container_registry_client.delete_image(&image) { - return Err(e); - } + let _ = container_registry_client.delete_image(&build.image); } } From 31388f7367c27c9b6492def6199864e59147db7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 24 Mar 2022 15:24:22 +0100 Subject: [PATCH 50/85] Skip build if image already exist remotly (#661) --- src/build_platform/local_docker.rs | 65 ++++++++++++++++-------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index b269c059..f52b7868 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -108,6 +108,24 @@ impl LocalDocker { lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, ) -> Result<(), BuildError> { + // logger + let log_info = { + let app_id = build.image.application_id.clone(); + move |msg: String| { + self.logger.log( + LogLevel::Info, + EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(msg.clone())), + ); + + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { id: app_id.clone() }, + ProgressLevel::Info, + Some(msg), + self.context.execution_id(), + )); + } + }; + // Going to inject only env var that are used by the dockerfile // so extracting it and modifying the image tag and env variables let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { @@ -143,6 +161,21 @@ impl LocalDocker { tags: vec!["latest".to_string()], }; + // Check if the image does not exist already remotly, if yes, we skip the build + let image_name = image_to_build.image_name(); + log_info(format!("Checking if image {} already exist remotely", image_name)); + if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { + log_info(format!( + "Image {} already exist in the registry, skipping build", + image_name + )); + + // skip build + return Ok(()); + } + + log_info(format!("Image {} does not exist remotely. Building it", image_name)); + // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build .environment_variables .iter() @@ -156,36 +189,8 @@ impl LocalDocker { &env_vars, &image_cache, true, - &mut |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); - - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Info, - Some(line), - self.context.execution_id(), - )); - }, - &mut |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); - - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Info, - Some(line), - self.context.execution_id(), - )); - }, + &mut |line| log_info(line), + &mut |line| log_info(line), &CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled), ); From 1732f9916c4743241119f5a6a8528ac6e80ace86 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 24 Mar 2022 16:09:52 +0100 Subject: [PATCH 51/85] Add emoji in logs --- src/build_platform/local_docker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index f52b7868..c110655d 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -163,10 +163,10 @@ impl LocalDocker { // Check if the image does not exist already remotly, if yes, we skip the build let image_name = image_to_build.image_name(); - log_info(format!("Checking if image {} already exist remotely", image_name)); + log_info(format!("🕵️ Checking if image {} already exist remotely", image_name)); if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { log_info(format!( - "Image {} already exist in the registry, skipping build", + "🎯 Image {} already exist in the registry, skipping build", image_name )); @@ -174,7 +174,7 @@ impl LocalDocker { return Ok(()); } - log_info(format!("Image {} does not exist remotely. Building it", image_name)); + log_info(format!("⌛ Image {} does not exist remotely. Building it", image_name)); // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build .environment_variables From b7d6b578ac284899d5735445ad348244d600110d Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 24 Mar 2022 16:13:06 +0100 Subject: [PATCH 52/85] Add emoji in logs --- src/build_platform/local_docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index c110655d..c419bb57 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -368,7 +368,7 @@ impl BuildPlatform for LocalDocker { // LOGGING let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); let msg = format!( - "Cloning repository: {} to {:?}", + "💾 Cloning repository: {} to {:?}", build.git_repository.url, repository_root_path ); listeners_helper.deployment_in_progress(ProgressInfo::new( From a38e0d0207af5216a5af17b3226d1e2edf35b630 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 24 Mar 2022 16:16:37 +0100 Subject: [PATCH 53/85] Add emoji in logs --- src/build_platform/local_docker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index c419bb57..8082fa92 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -174,7 +174,7 @@ impl LocalDocker { return Ok(()); } - log_info(format!("⌛ Image {} does not exist remotely. Building it", image_name)); + log_info(format!("⛏️ Image {} does not exist remotely. Building it", image_name)); // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build .environment_variables @@ -368,7 +368,7 @@ impl BuildPlatform for LocalDocker { // LOGGING let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); let msg = format!( - "💾 Cloning repository: {} to {:?}", + "📥 Cloning repository: {} to {:?}", build.git_repository.url, repository_root_path ); listeners_helper.deployment_in_progress(ProgressInfo::new( From 292777540bf98da53b1812056b9284a403b9699f Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 24 Mar 2022 17:27:08 +0100 Subject: [PATCH 54/85] Change wording --- src/build_platform/local_docker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 8082fa92..bc93ddac 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -163,10 +163,10 @@ impl LocalDocker { // Check if the image does not exist already remotly, if yes, we skip the build let image_name = image_to_build.image_name(); - log_info(format!("🕵️ Checking if image {} already exist remotely", image_name)); + log_info(format!("🕵️ Checking if image already exist remotely {}", image_name)); if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { log_info(format!( - "🎯 Image {} already exist in the registry, skipping build", + "🎯 Skipping build. Image already exist in the registry {}", image_name )); @@ -174,7 +174,7 @@ impl LocalDocker { return Ok(()); } - log_info(format!("⛏️ Image {} does not exist remotely. Building it", image_name)); + log_info(format!("⛏️ Building image. It does not exist remotely {}", image_name)); // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build .environment_variables From df1e5cb6cbe056fa4fd089a662f005560987beba Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Thu, 24 Mar 2022 17:27:56 +0100 Subject: [PATCH 55/85] fix: DO kubeconfig retrieval (#656) --- .../bootstrap/doks-gen-kubectl-config.j2.tf | 2 +- .../digitalocean/application.rs | 2 +- .../digitalocean/kubernetes/doks_api.rs | 11 ++-- .../digitalocean/kubernetes/mod.rs | 48 ++++++++--------- test_utilities/src/utilities.rs | 53 +++++-------------- tests/digitalocean/do_environment.rs | 13 +++-- 6 files changed, 55 insertions(+), 74 deletions(-) diff --git a/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf b/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf index 4d9ad324..414e32e5 100644 --- a/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf +++ b/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf @@ -7,5 +7,5 @@ KUBECONFIG resource "local_file" "kubeconfig" { filename = "${var.space_bucket_kubeconfig}/${var.kubeconfig_filename}" content = local.kubeconfig - file_permission = "0644" + file_permission = "0600" } \ No newline at end of file diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index 13763ab7..9beec675 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -247,7 +247,7 @@ impl Service for ApplicationDo { // This is specific to digital ocean as it is them that create the registry secret // we don't have the hand on it - context.insert("registry_secret", &self.build.image.registry_name); + context.insert("registry_secret", "do-container-registry-secret-for-cluster"); let storage = self .storage diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 28319e1d..6c21eee4 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -92,17 +92,22 @@ pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Res }; let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters.clone(); + let cluster_name = cluster_name.trim().to_lowercase(); match clusters_copy .into_iter() - .filter(|cluster| cluster.name == cluster_name.to_string()) + .filter(|cluster| cluster.name.trim().to_lowercase() == cluster_name.to_string()) .collect::>() .first() - .clone() { Some(cluster) => { let kubeconfig_url = format!("{}/clusters/{}/kubeconfig", DoApiType::Doks.api_url(), cluster.id); match do_get_from_api(token, DoApiType::Doks, kubeconfig_url) { - Ok(kubeconfig) => Ok(Some(kubeconfig)), + Ok(kubeconfig) => { + if kubeconfig.is_empty() { + return Ok(None); + } + Ok(Some(kubeconfig)) + } Err(e) => Err(CommandError::new_from_safe_message(e.message())), } } diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index c836e18a..3702e2d2 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -1739,18 +1739,27 @@ impl Kubernetes for DOKS { }, None => { let kubeconfig = match get_do_kubeconfig_by_cluster_name(self.cloud_provider.token(), self.name()) { - Ok(kubeconfig) => Ok(kubeconfig), - Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), - CommandError::new(e.message(), Some(e.message())), - )), - } - .expect("Unable to get kubeconfig"); + Ok(kubeconfig) => match kubeconfig { + None => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new_from_safe_message("Kubeconfig is empty".to_string()), + )) + } + Some(content) => content, + }, + Err(e) => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.message(), Some(e.message())), + )) + } + }; let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), - format!("object-storage/scaleway_os/{}", self.name()), + format!("object-storage/spaces/{}", self.name()), ) .map_err(|err| { EngineError::new_cannot_retrieve_cluster_config_file( @@ -1777,24 +1786,15 @@ impl Kubernetes for DOKS { .truncate(true) .open(path), ) { - Ok(mut created_file) => match kubeconfig.is_some() { - false => Err(EngineError::new_cannot_create_file( + Ok(mut created_file) => match block_on(created_file.write_all(kubeconfig.as_bytes())) { + Ok(_) => { + let file = File::open(path).unwrap(); + Ok((file_path, file)) + } + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( event_details.clone(), - CommandError::new( - "No kubeconfig found".to_string(), - Some("No kubeconfig found".to_string()), - ), + CommandError::new(e.to_string(), Some(e.to_string())), )), - true => match block_on(created_file.write_all(kubeconfig.unwrap().as_bytes())) { - Ok(_) => { - let file = File::open(path).unwrap(); - Ok((file_path, file)) - } - Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), - )), - }, }, Err(e) => Err(EngineError::new_cannot_create_file( event_details.clone(), diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 5d72e959..b6b36685 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -21,7 +21,6 @@ use retry::delay::Fibonacci; use retry::OperationResult; use std::env; use std::fs; -use tokio::io::AsyncWriteExt; use tracing::{info, warn}; use crate::scaleway::{ @@ -540,47 +539,19 @@ where secrets.clone().DIGITAL_OCEAN_TOKEN.unwrap().as_str(), cluster_name.clone().as_str(), ) { - Ok(kubeconfig) => Ok(kubeconfig), - Err(e) => Err(CommandError::new(e.message(), Some(e.message()))), - } - .expect("Unable to get kubeconfig"); + Ok(kubeconfig) => kubeconfig, + Err(e) => return OperationResult::Retry(CommandError::new(e.message(), Some(e.message()))), + }; - let workspace_directory = qovery_engine::fs::workspace_directory( - context.workspace_root_dir(), - context.execution_id(), - format!("object-storage/scaleway_os/{}", cluster_name.clone()), - ) - .map_err(|err| CommandError::new(err.to_string(), Some(err.to_string()))) - .expect("Unable to create directory"); - - let file_path = format!( - "{}/{}/{}", - workspace_directory, - format!("qovery-kubeconfigs-{}", context.cluster_id()), - format!("{}.yaml", context.cluster_id()) - ); - let path = Path::new(file_path.as_str()); - let parent_dir = path.parent().unwrap(); - let _ = block_on(tokio::fs::create_dir_all(parent_dir)); - - match block_on( - tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(path), - ) { - Ok(mut created_file) => match kubeconfig.is_some() { - false => Err(CommandError::new( - "No kubeconfig found".to_string(), - Some("No kubeconfig found".to_string()), - )), - true => match block_on(created_file.write_all(kubeconfig.unwrap().as_bytes())) { - Ok(_) => Ok(file_path), - Err(e) => Err(CommandError::new(e.to_string(), Some(e.to_string()))), - }, - }, - Err(e) => Err(CommandError::new(e.to_string(), Some(e.to_string()))), + match kubeconfig { + None => Err(CommandError::new( + "No kubeconfig found".to_string(), + Some("No kubeconfig found".to_string()), + )), + Some(file_content) => { + let _ = "test"; + Ok(file_content) + } } } Kind::Scw => { diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index d53888e4..d253bf3b 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -119,11 +119,16 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let env_action = environment.clone(); let env_action_for_delete = environment_for_delete.clone(); - let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); - assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); - assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); + + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); + assert!(matches!( + result, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); From 3ea5e3c3a67826cf1f4d154e66ba2bf1ee2f1877 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 24 Mar 2022 18:14:17 +0100 Subject: [PATCH 56/85] Add missing feature --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 533819ec..4c46c5cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,3 +110,4 @@ test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-ho # functionnal test with only a k8s cluster as a dependency test-with-kube = [] +test-with-docker = [] From 262efda972545cd01e9472d9216b61e3c1903b4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 24 Mar 2022 21:38:55 +0100 Subject: [PATCH 57/85] Create Dockerfile --- tests/docker/multi_stage_simple/Dockerfile | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 tests/docker/multi_stage_simple/Dockerfile diff --git a/tests/docker/multi_stage_simple/Dockerfile b/tests/docker/multi_stage_simple/Dockerfile new file mode 100644 index 00000000..10d224cb --- /dev/null +++ b/tests/docker/multi_stage_simple/Dockerfile @@ -0,0 +1,10 @@ + +FROM golang:1.16 AS build + +COPY hello.go /go/src/project/hello.go +WORKDIR /go/src/project +RUN go build hello.go + +FROM scratch +COPY --from=build /go/src/project/hello /bin/hello +ENTRYPOINT ["/bin/hello"] From 804fb1b9902cd111fb15cd15d4beed814826444b Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 25 Mar 2022 01:07:41 +0100 Subject: [PATCH 58/85] fix: DO kubeconfig retrieval cluster name (#662) --- src/cloud_provider/digitalocean/kubernetes/doks_api.rs | 2 +- src/cloud_provider/digitalocean/kubernetes/mod.rs | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 6c21eee4..107bab2b 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -95,7 +95,7 @@ pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Res let cluster_name = cluster_name.trim().to_lowercase(); match clusters_copy .into_iter() - .filter(|cluster| cluster.name.trim().to_lowercase() == cluster_name.to_string()) + .filter(|cluster| cluster.name.trim().to_lowercase() == cluster_name) .collect::>() .first() { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 3702e2d2..f9416411 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -187,6 +187,11 @@ impl DOKS { format!("{}.yaml", self.id) } + // TODO(benjaminch): Very dirty quickfix, should be removed and cluster id / name should be handled globally + fn doks_cluster_name(&self) -> String { + format!("qovery-{}", self.id) + } + // create a context to render tf files (terraform) contained in lib/digitalocean/ fn tera_context(&self) -> Result { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); @@ -1738,7 +1743,10 @@ impl Kubernetes for DOKS { )), }, None => { - let kubeconfig = match get_do_kubeconfig_by_cluster_name(self.cloud_provider.token(), self.name()) { + let kubeconfig = match get_do_kubeconfig_by_cluster_name( + self.cloud_provider.token(), + self.doks_cluster_name().as_str(), + ) { Ok(kubeconfig) => match kubeconfig { None => { return Err(EngineError::new_cannot_retrieve_cluster_config_file( From 4653964798a65cc6841dd32e79d42e012e8a4774 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 10:21:34 +0100 Subject: [PATCH 59/85] Linter (#663) --- src/cloud_provider/aws/application.rs | 8 +- src/cloud_provider/aws/databases/mongodb.rs | 6 +- src/cloud_provider/aws/databases/mysql.rs | 4 +- .../aws/databases/postgresql.rs | 4 +- src/cloud_provider/aws/databases/redis.rs | 6 +- .../aws/kubernetes/helm_charts.rs | 10 +- src/cloud_provider/aws/kubernetes/mod.rs | 165 ++++++++--------- src/cloud_provider/aws/kubernetes/node.rs | 2 +- src/cloud_provider/aws/mod.rs | 8 +- src/cloud_provider/aws/regions.rs | 6 +- src/cloud_provider/aws/router.rs | 16 +- .../digitalocean/application.rs | 10 +- .../digitalocean/databases/mongodb.rs | 6 +- .../digitalocean/databases/mysql.rs | 4 +- .../digitalocean/databases/postgresql.rs | 4 +- .../digitalocean/databases/redis.rs | 2 +- .../digitalocean/do_api_common.rs | 8 +- .../digitalocean/kubernetes/cidr.rs | 4 +- .../digitalocean/kubernetes/doks_api.rs | 6 +- .../digitalocean/kubernetes/helm_charts.rs | 9 +- .../digitalocean/kubernetes/mod.rs | 161 ++++++++-------- .../digitalocean/kubernetes/node.rs | 2 +- src/cloud_provider/digitalocean/mod.rs | 8 +- .../digitalocean/network/load_balancer.rs | 2 +- .../digitalocean/network/vpc.rs | 4 +- src/cloud_provider/digitalocean/router.rs | 29 ++- src/cloud_provider/helm.rs | 42 ++--- src/cloud_provider/kubernetes.rs | 107 +++++------ src/cloud_provider/qovery.rs | 8 +- src/cloud_provider/scaleway/application.rs | 8 +- .../scaleway/databases/mongodb.rs | 4 +- .../scaleway/databases/mysql.rs | 4 +- .../scaleway/databases/postgresql.rs | 4 +- .../scaleway/databases/redis.rs | 4 +- .../scaleway/kubernetes/helm_charts.rs | 8 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 173 +++++++++--------- .../scaleway/kubernetes/node.rs | 2 +- src/cloud_provider/scaleway/router.rs | 12 +- src/cloud_provider/service.rs | 109 +++++------ src/cloud_provider/utilities.rs | 22 +-- src/cmd/command.rs | 10 +- src/cmd/docker.rs | 46 ++--- src/cmd/helm.rs | 93 +++++----- src/cmd/kubectl.rs | 20 +- src/cmd/terraform.rs | 6 +- src/container_registry/docr.rs | 27 ++- src/container_registry/ecr.rs | 4 +- .../scaleway_container_registry.rs | 20 +- src/errors/io.rs | 5 +- src/errors/mod.rs | 156 +++++++--------- src/events/io.rs | 10 +- src/events/mod.rs | 8 +- src/fs.rs | 4 +- src/git.rs | 6 +- src/logger.rs | 14 +- src/object_storage/s3.rs | 12 +- src/object_storage/scaleway_object_storage.rs | 32 ++-- src/object_storage/spaces.rs | 2 +- src/transaction.rs | 8 +- src/unit_conversion.rs | 2 +- src/utilities.rs | 12 +- tests/aws/aws_databases.rs | 24 +-- tests/aws/aws_environment.rs | 42 ++--- tests/aws/aws_s3.rs | 48 ++--- tests/aws/aws_whole_enchilada.rs | 6 +- tests/digitalocean/do_databases.rs | 14 +- tests/digitalocean/do_environment.rs | 26 +-- tests/digitalocean/do_spaces.rs | 24 +-- tests/digitalocean/do_whole_enchilada.rs | 2 +- tests/scaleway/scw_databases.rs | 12 +- tests/scaleway/scw_environment.rs | 35 ++-- tests/scaleway/scw_whole_enchilada.rs | 2 +- 72 files changed, 785 insertions(+), 948 deletions(-) diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs index 47ff01dd..aa37c36e 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/cloud_provider/aws/application.rs @@ -223,7 +223,7 @@ impl Service for ApplicationAws { let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), + self.context.execution_id(), &self.id, self.total_cpus(), self.cpu_burst(), @@ -233,7 +233,7 @@ impl Service for ApplicationAws { Ok(l) => l, Err(e) => { return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), + event_details, self.total_cpus(), self.cpu_burst(), e, @@ -297,7 +297,7 @@ impl Create for ApplicationAws { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { @@ -317,7 +317,7 @@ impl Create for ApplicationAws { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 58e732c7..4fc2b6d4 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -123,7 +123,7 @@ impl Service for MongoDbAws { // https://docs.aws.amazon.com/documentdb/latest/developerguide/limits.html#limits-naming_constraints let prefix = "mongodb"; let max_size = 60 - prefix.len(); // 63 (max DocumentDB) - 3 (k8s statefulset chars) - let mut new_name = format!("{}{}", prefix, self.name().replace("_", "").replace("-", "")); + let mut new_name = format!("{}{}", prefix, self.name().replace('_', "").replace('-', "")); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); } @@ -183,7 +183,7 @@ impl Service for MongoDbAws { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -191,7 +191,7 @@ impl Service for MongoDbAws { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs index f1569d0d..2fdf6bd6 100644 --- a/src/cloud_provider/aws/databases/mysql.rs +++ b/src/cloud_provider/aws/databases/mysql.rs @@ -188,7 +188,7 @@ impl Service for MySQLAws { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -204,7 +204,7 @@ impl Service for MySQLAws { Ok(v) => v, Err(e) => { return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details.clone(), + event_details, "MySQL".to_string(), "parameter_group_family".to_string(), version.matched_version().to_string(), diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs index 754daf69..dc4d8d65 100644 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ b/src/cloud_provider/aws/databases/postgresql.rs @@ -188,7 +188,7 @@ impl Service for PostgreSQLAws { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -196,7 +196,7 @@ impl Service for PostgreSQLAws { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs index 4bc1acd8..3f71f08b 100644 --- a/src/cloud_provider/aws/databases/redis.rs +++ b/src/cloud_provider/aws/databases/redis.rs @@ -131,7 +131,7 @@ impl Service for RedisAws { // https://aws.amazon.com/about-aws/whats-new/2019/08/elasticache_supports_50_chars_cluster_name let prefix = "redis"; let max_size = 47 - prefix.len(); // 50 (max Elasticache ) - 3 (k8s statefulset chars) - let mut new_name = self.name().replace("_", "").replace("-", ""); + let mut new_name = self.name().replace('_', "").replace('-', ""); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); @@ -192,7 +192,7 @@ impl Service for RedisAws { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -208,7 +208,7 @@ impl Service for RedisAws { "default.redis6.x" } else { return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details.clone(), + event_details, "Elasicache".to_string(), "database_elasticache_parameter_group_name".to_string(), format!("default.redis{}", version), diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index b2d4a3d7..8c236279 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -68,7 +68,7 @@ pub fn aws_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -84,8 +84,8 @@ pub fn aws_helm_charts( qovery_terraform_config_file ); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; @@ -153,7 +153,7 @@ pub fn aws_helm_charts( ..Default::default() }, }; - let is_cni_old_installed_version = match aws_vpc_cni_chart.is_cni_old_installed_version(kubernetes_config, &envs) { + let is_cni_old_installed_version = match aws_vpc_cni_chart.is_cni_old_installed_version(kubernetes_config, envs) { Ok(x) => x, Err(e) => return Err(e), }; @@ -663,7 +663,7 @@ datasources: accessKey: '{}' secretKey: '{}' ", - prometheus_internal_url.clone(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace.to_string(), &loki.chart_info.name, diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 11e667d7..502bf5e1 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -170,9 +170,9 @@ impl EKS { Ok(x) => aws_zones.push(x), Err(e) => { return Err(EngineError::new_unsupported_zone( - event_details.clone(), + event_details, region.to_string(), - zone.to_string(), + zone, CommandError::new_from_safe_message(e.to_string()), )) } @@ -181,11 +181,8 @@ impl EKS { for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { - let err = EngineError::new_unsupported_instance_type( - event_details.clone(), - node_group.instance_type.as_str(), - e, - ); + let err = + EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); @@ -198,8 +195,8 @@ impl EKS { context.clone(), "s3-temp-id".to_string(), "default-s3".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), region.clone(), true, context.resource_expiration_in_seconds(), @@ -239,7 +236,7 @@ impl EKS { .dns_provider .resolvers() .iter() - .map(|x| format!("{}", x.clone().to_string())) + .map(|x| format!("{}", x.clone())) .collect(); terraform_list_format(managed_dns_resolvers) @@ -458,7 +455,7 @@ impl EKS { // Vault context.insert("vault_auth_method", "none"); - if let Some(_) = env::var_os("VAULT_ADDR") { + if env::var_os("VAULT_ADDR").is_some() { // select the correct used method match env::var_os("VAULT_ROLE_ID") { Some(role_id) => { @@ -471,7 +468,7 @@ impl EKS { LogLevel::Error, EngineEvent::Error( EngineError::new_missing_required_env_variable( - event_details.clone(), + event_details, "VAULT_SECRET_ID".to_string(), ), None, @@ -480,7 +477,7 @@ impl EKS { } } None => { - if let Some(_) = env::var_os("VAULT_TOKEN") { + if env::var_os("VAULT_TOKEN").is_some() { context.insert("vault_auth_method", "token") } } @@ -525,7 +522,7 @@ impl EKS { // AWS - EKS context.insert("aws_availability_zones", &aws_zones); - context.insert("eks_cidr_subnet", &eks_cidr_subnet.clone()); + context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); @@ -679,9 +676,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -690,13 +687,12 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -730,7 +726,7 @@ impl EKS { ), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -752,7 +748,7 @@ impl EKS { // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -776,11 +772,11 @@ impl EKS { cluster_id: self.id.clone(), cluster_long_id: self.long_id, region: self.region(), - cluster_name: self.cluster_name().to_string(), + cluster_name: self.cluster_name(), cloud_provider: "aws".to_string(), test_cluster: self.context.is_test_cluster(), - aws_access_key_id: self.cloud_provider.access_key_id().to_string(), - aws_secret_access_key: self.cloud_provider.secret_access_key().to_string(), + aws_access_key_id: self.cloud_provider.access_key_id(), + aws_secret_access_key: self.cloud_provider.secret_access_key(), vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(), qovery_engine_location: self.get_engine_location(), ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory), @@ -807,13 +803,13 @@ impl EKS { format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, Some(&temp_dir), - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, ) .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -837,12 +833,12 @@ impl EKS { match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { Ok(ok_line) => self.logger().log( LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new(ok_line, None)), + EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), ), ), @@ -911,9 +907,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -922,13 +918,12 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -946,7 +941,7 @@ impl EKS { tf_workers_resources_name } Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details.clone(), e); + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); return Err(error); @@ -954,10 +949,7 @@ impl EKS { }; if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes( - event_details.clone(), - None, - )); + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); } let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; @@ -983,7 +975,7 @@ impl EKS { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.to_string()), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); } _ => {} } @@ -1010,7 +1002,7 @@ impl EKS { return Err(error) } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) } } } @@ -1042,12 +1034,12 @@ impl EKS { self.send_to_customer(&message, &listeners_helper); self.logger().log( LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), + EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), ); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )), } @@ -1093,9 +1085,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1104,13 +1096,12 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1260,7 +1251,7 @@ impl EKS { ) .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion @@ -1282,12 +1273,12 @@ impl EKS { let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1356,11 +1347,11 @@ impl EKS { ), ); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1423,18 +1414,18 @@ impl EKS { self.logger().log( LogLevel::Info, EngineEvent::Deleting( - event_details.clone(), + event_details, EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), ), ); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } @@ -1522,7 +1513,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1536,7 +1527,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1608,9 +1599,9 @@ impl Kubernetes for EKS { context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1622,9 +1613,9 @@ impl Kubernetes for EKS { common_charts_temp_dir.as_str(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1663,7 +1654,7 @@ impl Kubernetes for EKS { } Err(e) => { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -1684,7 +1675,7 @@ impl Kubernetes for EKS { self.logger().log( LogLevel::Info, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new_from_safe( "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), ), @@ -1737,9 +1728,9 @@ impl Kubernetes for EKS { context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1752,9 +1743,9 @@ impl Kubernetes for EKS { crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1799,14 +1790,14 @@ impl Kubernetes for EKS { let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } } // enable cluster autoscaler deployment - self.set_cluster_autoscaler_replicas(event_details.clone(), 1) + self.set_cluster_autoscaler_replicas(event_details, 1) } #[named] @@ -1817,7 +1808,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1831,7 +1822,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1845,7 +1836,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1859,7 +1850,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1873,7 +1864,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -1887,7 +1878,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -1901,7 +1892,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -1915,7 +1906,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -1971,7 +1962,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -1999,7 +1990,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) diff --git a/src/cloud_provider/aws/kubernetes/node.rs b/src/cloud_provider/aws/kubernetes/node.rs index 953cdbd8..2a88784f 100644 --- a/src/cloud_provider/aws/kubernetes/node.rs +++ b/src/cloud_provider/aws/kubernetes/node.rs @@ -72,7 +72,7 @@ impl FromStr for AwsInstancesType { "t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index e0f7e2ea..d40d83ae 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -115,11 +115,9 @@ impl CloudProvider for AWS { match s { Ok(_x) => Ok(()), - Err(_) => { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - } + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, + )), } } diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index bc6aa954..907a37f5 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -173,7 +173,7 @@ impl AwsRegion { pub fn to_string(&self) -> String { let enum_name = format!("{}", self); - format!("{}", enum_name) + enum_name } pub fn to_aws_format(&self) -> String { @@ -377,7 +377,7 @@ impl AwsZones { pub fn from_string(zone: String) -> Result { // create tmp region from zone and get zone name (one letter) - let sanitized_zone_name = zone.to_lowercase().replace("-", "").replace("_", ""); + let sanitized_zone_name = zone.to_lowercase().replace('-', "").replace('_', ""); let mut sanitized_region = sanitized_zone_name.clone(); sanitized_region.pop(); @@ -392,7 +392,7 @@ impl AwsZones { // check if the zone is currently supported for zone in region.get_zones() { - if zone.to_string().replace("-", "") == sanitized_zone_name { + if zone.to_string().replace('-', "") == sanitized_zone_name { return Ok(zone); } } diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index eec33d08..656b4d2e 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -154,7 +154,7 @@ impl Service for RouterAws { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) @@ -167,8 +167,6 @@ impl Service for RouterAws { _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); // autoscaler @@ -197,7 +195,7 @@ impl Service for RouterAws { self.logger().log( LogLevel::Warning, EngineEvent::Warning( - event_details.clone(), + event_details, EventMessage::new_from_safe( "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), @@ -211,7 +209,7 @@ impl Service for RouterAws { self.logger().log( LogLevel::Warning, EngineEvent::Warning( - event_details.clone(), + event_details, EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), ), ); @@ -338,9 +336,9 @@ impl Create for RouterAws { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -365,7 +363,7 @@ impl Create for RouterAws { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index 9beec675..2e96c929 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -81,7 +81,7 @@ impl ApplicationDo { } fn is_stateful(&self) -> bool { - self.storage.len() > 0 + !self.storage.is_empty() } fn cloud_provider_name(&self) -> &str { @@ -213,7 +213,7 @@ impl Service for ApplicationDo { let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), + self.context.execution_id(), &self.id, self.total_cpus(), self.cpu_burst(), @@ -223,7 +223,7 @@ impl Service for ApplicationDo { Ok(l) => l, Err(e) => { return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), + event_details, self.total_cpus(), self.cpu_burst(), e, @@ -265,7 +265,7 @@ impl Service for ApplicationDo { }) .collect::>(); - let is_storage = storage.len() > 0; + let is_storage = !storage.is_empty(); context.insert("storage", &storage); context.insert("is_storage", &is_storage); @@ -300,7 +300,7 @@ impl Create for ApplicationDo { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index 177fb0eb..9a257482 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -67,7 +67,7 @@ impl MongoDo { fn matching_correct_version(&self, event_details: EventDetails) -> Result { check_service_version( - get_self_hosted_mongodb_version(self.version().clone()), + get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger(), @@ -175,7 +175,7 @@ impl Service for MongoDo { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -183,7 +183,7 @@ impl Service for MongoDo { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index ccd201bb..cd92f716 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -175,7 +175,7 @@ impl Service for MySQLDo { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -183,7 +183,7 @@ impl Service for MySQLDo { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index 9b7dbd1e..b993f2ef 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -175,7 +175,7 @@ impl Service for PostgresDo { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -183,7 +183,7 @@ impl Service for PostgresDo { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 8ed8d5b2..709c0e69 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -181,7 +181,7 @@ impl Service for RedisDo { ); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); diff --git a/src/cloud_provider/digitalocean/do_api_common.rs b/src/cloud_provider/digitalocean/do_api_common.rs index c98ab564..69471800 100644 --- a/src/cloud_provider/digitalocean/do_api_common.rs +++ b/src/cloud_provider/digitalocean/do_api_common.rs @@ -42,15 +42,15 @@ pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Res api_type ); return Err(CommandError::new( - format!("{}, response: {:?}", message_safe.to_string(), response), - Some(message_safe.to_string()), + format!("{}, response: {:?}", message_safe, response), + Some(message_safe), )); } _ => { let message_safe = format!("Unknown status code received from Digital Ocean Kubernetes API while retrieving {} information.", api_type); return Err(CommandError::new( - format!("{}, response: {:?}", message_safe.to_string(), response), - Some(message_safe.to_string()), + format!("{}, response: {:?}", message_safe, response), + Some(message_safe), )); } } diff --git a/src/cloud_provider/digitalocean/kubernetes/cidr.rs b/src/cloud_provider/digitalocean/kubernetes/cidr.rs index 6cad8fd7..8184403b 100644 --- a/src/cloud_provider/digitalocean/kubernetes/cidr.rs +++ b/src/cloud_provider/digitalocean/kubernetes/cidr.rs @@ -17,7 +17,7 @@ pub struct DoVpc { pub fn get_used_cidr_on_region(token: &str) { let mut output_from_cli = String::new(); - let mut cmd = QoveryCommand::new("doctl", &vec!["vpcs", "list", "--output", "json", "-t", token], &vec![]); + let mut cmd = QoveryCommand::new("doctl", &["vpcs", "list", "--output", "json", "-t", token], &[]); let _ = cmd.exec_with_output(&mut |r_out| output_from_cli.push_str(&r_out), &mut |r_err| { error!( "DOCTL CLI error from cmd inserted, please check vpcs list command{}", @@ -26,5 +26,5 @@ pub fn get_used_cidr_on_region(token: &str) { }); let buff = output_from_cli.borrow(); - let _array: Vec = serde_json::from_str(&buff).expect("JSON is not well-formatted"); + let _array: Vec = serde_json::from_str(buff).expect("JSON is not well-formatted"); } diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 107bab2b..ba570214 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -27,7 +27,7 @@ pub fn get_doks_info_from_name( Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; return Err(CommandError::new( - format!("{}, error: {}", safe_message.to_string(), e.to_string()), + format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()), )); } @@ -51,7 +51,7 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; return Err(CommandError::new( - format!("{}, error: {}", safe_message.to_string(), e.to_string()), + format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()), )); } @@ -91,7 +91,7 @@ pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Res Err(e) => Err(CommandError::new_from_safe_message(e.message())), }; - let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters.clone(); + let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters; let cluster_name = cluster_name.trim().to_lowercase(); match clusters_copy .into_iter() diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 0078579e..bf8b7f63 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -122,7 +122,7 @@ pub fn do_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -138,8 +138,8 @@ pub fn do_helm_charts( qovery_terraform_config_file ); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; @@ -1012,8 +1012,7 @@ datasources: ) ) .as_bytes(), - ) - .to_string(), + ), }, ChartSetValue { key: "do_container_registry_secret_identifier".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index f9416411..4296c6ba 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -134,7 +134,7 @@ impl DOKS { QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(region.to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), - Transmitter::Kubernetes(id.to_string(), name.to_string()), + Transmitter::Kubernetes(id, name), ), node_group.instance_type.as_str(), e, @@ -150,8 +150,8 @@ impl DOKS { context.clone(), "spaces-temp-id".to_string(), "my-spaces-object-storage".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), region, BucketDeleteStrategy::HardDelete, ); @@ -217,17 +217,15 @@ impl DOKS { Ok(vpcs) => match vpcs { // new vpc: select a random non used subnet None => { - match get_do_random_available_subnet_from_api(&self.cloud_provider.token(), self.region) { + match get_do_random_available_subnet_from_api(self.cloud_provider.token(), self.region) { Ok(x) => x, - Err(e) => { - return Err(EngineError::new_cannot_get_any_available_vpc(event_details.clone(), e)) - } + Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details, e)), } } // existing vpc: assign current subnet in this case Some(vpc) => vpc.ip_range, }, - Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details, e)), } } VpcInitKind::Manual => self.options.vpc_cidr_block.clone(), @@ -383,7 +381,7 @@ impl DOKS { LogLevel::Error, EngineEvent::Error( EngineError::new_missing_required_env_variable( - event_details.clone(), + event_details, "VAULT_SECRET_ID".to_string(), ), None, @@ -413,7 +411,7 @@ impl DOKS { match get_do_latest_doks_slug_from_api(self.cloud_provider.token(), self.version()) { Ok(version) => match version { None => Err(EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, self.kind().to_string(), VersionsNumber::from_str(&self.version) .expect("cannot parse version") @@ -422,7 +420,7 @@ impl DOKS { Some(v) => Ok(v), }, Err(e) => Err(EngineError::new_cannot_get_supported_versions_error( - event_details.clone(), + event_details, self.kind().to_string(), e, )), @@ -463,7 +461,7 @@ impl DOKS { let api_url = format!("{}/clusters", DoApiType::Doks.api_url()); let json_content = do_get_from_api(self.cloud_provider.token(), DoApiType::Doks, api_url)?; // TODO(benjaminch): `qovery-` to be added into Rust name directly everywhere - match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id().to_string())) { + match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id())) { Ok(cluster_result) => match cluster_result { None => Err(CommandError::new_from_safe_message( "Cluster doesn't exist on DO side.".to_string(), @@ -548,9 +546,9 @@ impl DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -559,13 +557,12 @@ impl DOKS { // this is due to the required dependencies of lib/digitalocean/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -604,7 +601,7 @@ impl DOKS { ), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -625,11 +622,8 @@ impl DOKS { // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - e, - ); + let error = + EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); return Err(error); @@ -638,7 +632,7 @@ impl DOKS { // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -661,7 +655,7 @@ impl DOKS { ) } Err(e) => { - return Err(EngineError::new_k8s_node_not_ready(event_details.clone(), e)); + return Err(EngineError::new_k8s_node_not_ready(event_details, e)); } }; @@ -675,7 +669,7 @@ impl DOKS { let doks_id = match self.get_doks_info_from_name_api() { Ok(cluster) => cluster.id, - Err(e) => return Err(EngineError::new_cannot_get_cluster_error(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_cluster_error(event_details, e)), }; let charts_prerequisites = ChartsConfigPrerequisites { @@ -685,13 +679,13 @@ impl DOKS { cluster_id: self.id.clone(), cluster_long_id: self.long_id, do_cluster_id: doks_id, - region: self.region().to_string(), - cluster_name: self.cluster_name().to_string(), + region: self.region(), + cluster_name: self.cluster_name(), cloud_provider: "digitalocean".to_string(), test_cluster: self.context.is_test_cluster(), do_token: self.cloud_provider.token().to_string(), - do_space_access_id: self.cloud_provider.access_key_id().to_string(), - do_space_secret_key: self.cloud_provider.secret_access_key().to_string(), + do_space_access_id: self.cloud_provider.access_key_id(), + do_space_secret_key: self.cloud_provider.secret_access_key(), do_space_bucket_kubeconfig: self.kubeconfig_bucket_name(), do_space_kubeconfig_filename: self.kubeconfig_file_name(), qovery_engine_location: self.options.qovery_engine_location.clone(), @@ -725,7 +719,7 @@ impl DOKS { .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -760,7 +754,7 @@ impl DOKS { return Err(EngineError::new_k8s_loadbalancer_configuration_issue( event_details.clone(), CommandError::new( - format!("{}, error: {}.", safe_message.to_string(), e.message(),), + format!("{}, error: {}.", safe_message, e.message(),), Some(safe_message.to_string()), ), )); @@ -796,8 +790,8 @@ impl DOKS { .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; // This will ony print the diff on stdout - let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &vec![]); - helm.upgrade(&load_balancer_dns_hostname, &vec![]) + let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &[]); + helm.upgrade(&load_balancer_dns_hostname, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } @@ -817,12 +811,12 @@ impl DOKS { match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { Ok(ok_line) => self.logger().log( LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new(ok_line, None)), + EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), ), ), @@ -905,9 +899,9 @@ impl DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -917,13 +911,12 @@ impl DOKS { let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1059,7 +1052,7 @@ impl DOKS { ) .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion @@ -1081,12 +1074,12 @@ impl DOKS { let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1155,11 +1148,11 @@ impl DOKS { ), ); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1222,18 +1215,18 @@ impl DOKS { self.logger().log( LogLevel::Info, EngineEvent::Deleting( - event_details.clone(), + event_details, EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), ), ); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } @@ -1321,7 +1314,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1335,7 +1328,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1400,7 +1393,7 @@ impl Kubernetes for DOKS { Ok(version) => match version { None => { return Err(EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, self.kind().to_string(), VersionsNumber::from_str(&self.version) .expect("cannot parse version") @@ -1411,14 +1404,14 @@ impl Kubernetes for DOKS { }, Err(e) => { return Err(EngineError::new_cannot_get_supported_versions_error( - event_details.clone(), + event_details, self.kind().to_string(), e, )) } }; - context.insert("doks_version", format!("{}", &upgrade_doks_version).as_str()); + context.insert("doks_version", (&upgrade_doks_version).to_string().as_str()); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1426,22 +1419,21 @@ impl Kubernetes for DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1468,7 +1460,7 @@ impl Kubernetes for DOKS { self.logger().log( LogLevel::Info, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new_from_safe( "Kubernetes nodes have been successfully upgraded.".to_string(), ), @@ -1477,7 +1469,7 @@ impl Kubernetes for DOKS { } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( - event_details.clone(), + event_details, kubernetes_upgrade_status.requested_version.to_string(), e, )); @@ -1485,7 +1477,7 @@ impl Kubernetes for DOKS { }, Err(e) => { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -1502,7 +1494,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1516,7 +1508,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1530,7 +1522,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1544,7 +1536,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1558,7 +1550,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -1572,7 +1564,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -1586,7 +1578,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -1600,7 +1592,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -1656,7 +1648,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -1684,7 +1676,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -1713,13 +1705,10 @@ impl Kubernetes for DOKS { self.logger().log( LogLevel::Debug, EngineEvent::Debug( - self.get_event_details(stage.clone()), + self.get_event_details(stage), EventMessage::new( err.to_string(), - Some( - format!("Error, couldn't open {} file", &local_kubeconfig_generated,) - .to_string(), - ), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), ), ), ); @@ -1750,7 +1739,7 @@ impl Kubernetes for DOKS { Ok(kubeconfig) => match kubeconfig { None => { return Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), + event_details, CommandError::new_from_safe_message("Kubeconfig is empty".to_string()), )) } @@ -1758,7 +1747,7 @@ impl Kubernetes for DOKS { }, Err(e) => { return Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), + event_details, CommandError::new(e.message(), Some(e.message())), )) } @@ -1814,7 +1803,7 @@ impl Kubernetes for DOKS { match result { Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), + event_details, CommandError::new(e.message(), Some(e.message())), )), Ok((file_path, file)) => Ok((file_path, file)), diff --git a/src/cloud_provider/digitalocean/kubernetes/node.rs b/src/cloud_provider/digitalocean/kubernetes/node.rs index 3a5bb7a5..549fa3eb 100644 --- a/src/cloud_provider/digitalocean/kubernetes/node.rs +++ b/src/cloud_provider/digitalocean/kubernetes/node.rs @@ -113,7 +113,7 @@ impl FromStr for DoInstancesType { "s-32vcpu-192gb" => Ok(DoInstancesType::S32vcpu192gb), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 36d2689d..1fa1ae7f 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -105,11 +105,9 @@ impl CloudProvider for DO { let client = DigitalOcean::new(&self.token); match client { Ok(_x) => Ok(()), - Err(_) => { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - } + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( + event_details, + )), } } diff --git a/src/cloud_provider/digitalocean/network/load_balancer.rs b/src/cloud_provider/digitalocean/network/load_balancer.rs index ee9e2cd1..ace9ca02 100644 --- a/src/cloud_provider/digitalocean/network/load_balancer.rs +++ b/src/cloud_provider/digitalocean/network/load_balancer.rs @@ -143,7 +143,7 @@ mod tests_do_api_output { } } "#; - let ip_returned_from_api = get_ip_from_do_load_balancer_api_output(&json_content); + let ip_returned_from_api = get_ip_from_do_load_balancer_api_output(json_content); assert_eq!(ip_returned_from_api.unwrap().to_string(), "104.131.186.241"); } diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 86074690..b890eb28 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -129,7 +129,7 @@ fn do_get_vpcs_from_api_output(json_content: &str) -> Result, CommandEr Err(e) => { let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API"; Err(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e), + format!("{}, error: {}", message_safe, e), Some(message_safe.to_string()), )) } @@ -307,7 +307,7 @@ mod tests_do_vpcs { let json_content = do_get_vpc_json(); let existing_vpcs = do_get_vpcs_from_api_output(&json_content).unwrap(); - assert!(get_random_available_subnet(existing_vpcs.clone(), DoRegion::Frankfurt).is_ok()); + assert!(get_random_available_subnet(existing_vpcs, DoRegion::Frankfurt).is_ok()); } #[test] diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index 87635ad6..91db5f5b 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -162,24 +162,19 @@ impl Service for RouterDo { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) { - Some(application) => match application.private_port() { - Some(private_port) => Some(RouteDataTemplate { - path: r.path.clone(), - application_name: application.sanitized_name().to_string(), - application_port: private_port, - }), - _ => None, - }, + Some(application) => application.private_port().map(|private_port| RouteDataTemplate { + path: r.path.clone(), + application_name: application.sanitized_name(), + application_port: private_port, + }), _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); // autoscaler @@ -210,7 +205,7 @@ impl Service for RouterDo { self.logger().log( LogLevel::Warning, EngineEvent::Warning( - event_details.clone(), + event_details, EventMessage::new_from_safe( "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), @@ -224,7 +219,7 @@ impl Service for RouterDo { self.logger().log( LogLevel::Warning, EngineEvent::Warning( - event_details.clone(), + event_details, EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), ), ); @@ -355,9 +350,9 @@ impl Create for RouterDo { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -381,7 +376,7 @@ impl Create for RouterDo { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e)) } diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index ad5f9c57..2693886a 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -201,16 +201,16 @@ pub trait HelmChart: Send { fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result, CommandError> { info!("prepare and deploy chart {}", &self.get_chart_info().name); let payload = self.check_prerequisites()?; - let payload = self.pre_exec(&kubernetes_config, &envs, payload)?; - let payload = match self.exec(&kubernetes_config, &envs, payload.clone()) { + let payload = self.pre_exec(kubernetes_config, envs, payload)?; + let payload = match self.exec(kubernetes_config, envs, payload.clone()) { Ok(payload) => payload, Err(e) => { error!("Error while deploying chart: {}", e.message()); - self.on_deploy_failure(&kubernetes_config, &envs, payload)?; + self.on_deploy_failure(kubernetes_config, envs, payload)?; return Err(e); } }; - let payload = self.post_exec(&kubernetes_config, &envs, payload)?; + let payload = self.post_exec(kubernetes_config, envs, payload)?; Ok(payload) } @@ -226,18 +226,18 @@ pub trait HelmChart: Send { match chart_info.action { HelmAction::Deploy => { - if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) { + if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { warn!( "error while trying to destroy chart if breaking change is detected: {:?}", e.to_string() ); } - helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?; + helm.upgrade(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Destroy => { let chart_info = self.get_chart_info(); - helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?; + helm.uninstall(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Skip => {} } @@ -306,7 +306,7 @@ fn deploy_parallel_charts( Err(e) => { let safe_message = "Thread panicked during parallel charts deployments."; let error = Err(CommandError::new( - format!("{}, error: {:?}", safe_message.to_string(), e), + format!("{}, error: {:?}", safe_message, e), Some(safe_message.to_string()), )); errors.push(error); @@ -338,7 +338,7 @@ pub fn deploy_charts_levels( let chart_info = chart.get_chart_info(); // don't do diff on destroy or skip if chart_info.action == HelmAction::Deploy { - let _ = helm.upgrade_diff(chart_info, &vec![]); + let _ = helm.upgrade_diff(chart_info, &[]); } } @@ -347,7 +347,7 @@ pub fn deploy_charts_levels( continue; } - if let Err(e) = deploy_parallel_charts(&kubernetes_config, &envs, level) { + if let Err(e) = deploy_parallel_charts(kubernetes_config, envs, level) { return Err(e); } } @@ -442,7 +442,7 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "annotate", "--overwrite", - &kind, + kind, &self.chart_info.name, format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(), ], @@ -456,7 +456,7 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "annotate", "--overwrite", - &kind, + kind, &self.chart_info.name, "meta.helm.sh/release-namespace=kube-system", ], @@ -470,7 +470,7 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "label", "--overwrite", - &kind, + kind, &self.chart_info.name, "app.kubernetes.io/managed-by=Helm", ], @@ -490,7 +490,7 @@ impl HelmChart for CoreDNSConfigChart { fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result, CommandError> { info!("prepare and deploy chart {}", &self.get_chart_info().name); self.check_prerequisites()?; - let payload = match self.pre_exec(&kubernetes_config, &envs, None) { + let payload = match self.pre_exec(kubernetes_config, envs, None) { Ok(p) => match p { None => { return Err(CommandError::new_from_safe_message( @@ -501,12 +501,12 @@ impl HelmChart for CoreDNSConfigChart { }, Err(e) => return Err(e), }; - if let Err(e) = self.exec(&kubernetes_config, &envs, None) { + if let Err(e) = self.exec(kubernetes_config, envs, None) { error!("Error while deploying chart: {:?}", e.message()); - self.on_deploy_failure(&kubernetes_config, &envs, None)?; + self.on_deploy_failure(kubernetes_config, envs, None)?; return Err(e); }; - self.post_exec(&kubernetes_config, &envs, Some(payload))?; + self.post_exec(kubernetes_config, envs, Some(payload))?; Ok(None) } @@ -594,19 +594,19 @@ impl HelmChart for PrometheusOperatorConfigChart { match chart_info.action { HelmAction::Deploy => { - if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) { + if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { warn!( "error while trying to destroy chart if breaking change is detected: {}", e.to_string() ); } - helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?; + helm.upgrade(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Destroy => { let chart_info = self.get_chart_info(); - if helm.check_release_exist(&chart_info, &vec![]).is_ok() { - helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?; + if helm.check_release_exist(chart_info, &[]).is_ok() { + helm.uninstall(chart_info, &[]).map_err(to_command_error)?; let prometheus_crds = [ "prometheuses.monitoring.coreos.com", diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index e5eb10b9..3487b50c 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -72,7 +72,7 @@ pub trait Kubernetes: Listen { QoveryIdentifier::from(context.organization_id().to_string()), QoveryIdentifier::from(context.cluster_id().to_string()), QoveryIdentifier::from(context.execution_id().to_string()), - Some(self.region().to_string()), + Some(self.region()), stage, Transmitter::Kubernetes(self.id().to_string(), self.name().to_string()), ) @@ -103,10 +103,7 @@ pub trait Kubernetes: Listen { self.get_event_details(stage.clone()), EventMessage::new( err.to_string(), - Some( - format!("Error, couldn't open {} file", &local_kubeconfig_generated,) - .to_string(), - ), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), ), ), ); @@ -136,7 +133,7 @@ pub trait Kubernetes: Listen { Ok((path, file)) => (path, file), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), + self.get_event_details(stage), err.into(), ); self.logger() @@ -151,10 +148,8 @@ pub trait Kubernetes: Listen { Ok(metadata) => metadata, Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), - CommandError::new_from_safe_message( - format!("Error getting file metadata, error: {}", err.to_string(),).to_string(), - ), + self.get_event_details(stage), + CommandError::new_from_safe_message(format!("Error getting file metadata, error: {}", err,)), ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -166,11 +161,8 @@ pub trait Kubernetes: Listen { permissions.set_mode(0o400); if let Err(err) = std::fs::set_permissions(string_path.as_str(), permissions) { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), - CommandError::new_from_safe_message(format!( - "Error setting file permissions, error: {}", - err.to_string(), - )), + self.get_event_details(stage), + CommandError::new_from_safe_message(format!("Error setting file permissions, error: {}", err,)), ); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); @@ -197,9 +189,10 @@ pub trait Kubernetes: Listen { Err(err) => { let error = EngineError::new_cannot_get_cluster_nodes( self.get_event_details(stage), - CommandError::new_from_safe_message( - format!("Error while trying to get cluster nodes, error: {}", err.message()).to_string(), - ), + CommandError::new_from_safe_message(format!( + "Error while trying to get cluster nodes, error: {}", + err.message() + )), ); self.logger() @@ -251,9 +244,9 @@ pub trait Kubernetes: Listen { Err(e) => Err(e), Ok(..) => match is_kubernetes_upgrade_required( kubeconfig, - &self.version(), + self.version(), self.cloud_provider().credentials_environment_variables(), - event_details.clone(), + event_details, self.logger(), ) { Ok(x) => self.upgrade_with_status(x), @@ -354,7 +347,7 @@ pub trait Kubernetes: Listen { envs.clone(), ) { return Err(EngineError::new_k8s_cannot_delete_pod( - event_details.clone(), + event_details, pod.metadata.name.to_string(), e, )); @@ -362,10 +355,7 @@ pub trait Kubernetes: Listen { } } Err(e) => { - return Err(EngineError::new_k8s_cannot_get_crash_looping_pods( - event_details.clone(), - e, - )); + return Err(EngineError::new_k8s_cannot_get_crash_looping_pods(event_details, e)); } }, }; @@ -770,7 +760,7 @@ pub fn delete_environment( // do not catch potential error - to confirm let _ = kubectl::kubectl_exec_delete_namespace( kubernetes.get_kubeconfig_file_path()?, - &environment.namespace(), + environment.namespace(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -837,7 +827,7 @@ where Ok(_) => {} Err(Operation { error, .. }) => { return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details.clone(), + event_details, "Cert-Manager".to_string(), object.to_string(), error, @@ -845,7 +835,7 @@ where } Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details.clone(), + event_details, "Cert-Manager".to_string(), object.to_string(), CommandError::new_from_safe_message(msg), @@ -870,19 +860,14 @@ where // check master versions let v = match kubectl_exec_version(&kubernetes_config, envs.clone()) { Ok(v) => v, - Err(e) => { - return Err(EngineError::new_cannot_execute_k8s_exec_version( - event_details.clone(), - e, - )) - } + Err(e) => return Err(EngineError::new_cannot_execute_k8s_exec_version(event_details, e)), }; let raw_version = format!("{}.{}", v.server_version.major, v.server_version.minor); let masters_version = match VersionsNumber::from_str(raw_version.as_str()) { Ok(vn) => vn, Err(_) => { return Err(EngineError::new_cannot_determine_k8s_master_version( - event_details.clone(), + event_details, raw_version.to_string(), )) } @@ -892,7 +877,7 @@ where let mut workers_version: Vec = vec![]; let nodes = match kubectl_exec_get_node(kubernetes_config, envs) { Ok(n) => n, - Err(e) => return Err(EngineError::new_cannot_get_cluster_nodes(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_cluster_nodes(event_details, e)), }; for node in nodes.items { @@ -901,7 +886,7 @@ where Ok(vn) => workers_version.push(vn), Err(_) => { return Err(EngineError::new_cannot_determine_k8s_kubelet_worker_version( - event_details.clone(), + event_details, node.status.node_info.kubelet_version.to_string(), )) } @@ -912,7 +897,7 @@ where Ok(vn) => workers_version.push(vn), Err(_) => { return Err(EngineError::new_cannot_determine_k8s_kube_proxy_version( - event_details.clone(), + event_details, node.status.node_info.kube_proxy_version.to_string(), )) } @@ -923,7 +908,7 @@ where requested_version, masters_version, workers_version, - event_details.clone(), + event_details, logger, ) } @@ -943,7 +928,7 @@ where for pdb in pdbs.items.unwrap() { if pdb.status.current_healthy < pdb.status.desired_healthy { return Err(EngineError::new_k8s_pod_disruption_budget_invalid_state( - event_details.clone(), + event_details, pdb.metadata.name, )); } @@ -951,12 +936,10 @@ where Ok(()) } }, - Err(err) => { - return Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget( - event_details.clone(), - err, - )); - } + Err(err) => Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget( + event_details, + err, + )), } } @@ -979,7 +962,7 @@ where )); } } - return OperationResult::Ok(()); + OperationResult::Ok(()) } } }); @@ -1014,16 +997,16 @@ where )); } } - return OperationResult::Ok(()); + OperationResult::Ok(()) } } }); - return match result { + match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), Err(retry::Error::Internal(e)) => Err(CommandError::new_from_safe_message(e)), - }; + } } #[derive(Debug, PartialEq)] @@ -1067,7 +1050,7 @@ fn check_kubernetes_upgrade_status( Ok(v) => v, Err(e) => { return Err(EngineError::new_cannot_determine_k8s_requested_upgrade_version( - event_details.clone(), + event_details, requested_version.to_string(), Some(e), )); @@ -1093,7 +1076,7 @@ fn check_kubernetes_upgrade_status( Err(e) => { return Err( EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details.clone(), + event_details, deployed_masters_version, wished_version, e, @@ -1107,7 +1090,7 @@ fn check_kubernetes_upgrade_status( logger.log( LogLevel::Warning, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new_from_safe( "No worker nodes found, can't check if upgrade is required for workers".to_string(), ), @@ -1146,7 +1129,7 @@ fn check_kubernetes_upgrade_status( Err(e) => { return Err( EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details.clone(), + event_details, node, wished_version, e, @@ -1159,7 +1142,7 @@ fn check_kubernetes_upgrade_status( logger.log( LogLevel::Info, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new_from_safe(match &required_upgrade_on { None => "All workers are up to date, no upgrade required".to_string(), Some(node_type) => match node_type { @@ -1324,9 +1307,7 @@ where { let listeners = std::clone::Clone::clone(kubernetes.listeners()); let logger = kubernetes.logger().clone_dyn(); - let event_details = kubernetes - .get_event_details(Stage::Infrastructure(InfrastructureStep::Create)) - .clone(); + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); let progress_info = ProgressInfo::new( ProgressScope::Infrastructure { @@ -1573,7 +1554,7 @@ mod tests { "1.17", version_1_17.clone(), vec![version_1_17.clone(), version_1_16.clone()], - event_details.clone(), + event_details, &logger, ) .unwrap(); @@ -1590,7 +1571,7 @@ mod tests { "Provider version: {} | Wished version: {} | Is upgrade required: {:?}", provider_version.clone(), provider.clone(), - compare_kubernetes_cluster_versions_for_upgrade(&provider_version, &provider) + compare_kubernetes_cluster_versions_for_upgrade(provider_version, provider) .unwrap() .message ) @@ -2070,8 +2051,8 @@ mod tests { let milli_cpu = "250m".to_string(); let int_cpu = "2".to_string(); - assert_eq!(convert_k8s_cpu_value_to_f32(milli_cpu).unwrap(), 0.25 as f32); - assert_eq!(convert_k8s_cpu_value_to_f32(int_cpu).unwrap(), 2 as f32); + assert_eq!(convert_k8s_cpu_value_to_f32(milli_cpu).unwrap(), 0.25_f32); + assert_eq!(convert_k8s_cpu_value_to_f32(int_cpu).unwrap(), 2_f32); } #[test] @@ -2122,7 +2103,7 @@ mod tests { context_id, total_cpu, cpu_burst, - event_details.clone(), + event_details, &logger ) .unwrap(), diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index 1c9c24f9..d9a27611 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -63,13 +63,13 @@ pub fn get_qovery_app_version( Ok(x) => match x.json::() { Ok(qa) => Ok(qa), Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )), }, Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )), } } diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index c2549ec0..cd13f3cc 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -228,7 +228,7 @@ impl Service for ApplicationScw { let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), + self.context.execution_id(), &self.id, self.total_cpus(), self.cpu_burst(), @@ -238,7 +238,7 @@ impl Service for ApplicationScw { Ok(l) => l, Err(e) => { return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), + event_details, self.total_cpus(), self.cpu_burst(), e, @@ -313,7 +313,7 @@ impl Create for ApplicationScw { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); @@ -334,7 +334,7 @@ impl Create for ApplicationScw { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index 933731cb..fbdb3670 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -177,7 +177,7 @@ impl Service for MongoDbScw { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -185,7 +185,7 @@ impl Service for MongoDbScw { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index f543c5a3..1400b456 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -203,7 +203,7 @@ impl Service for MySQLScw { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -211,7 +211,7 @@ impl Service for MySQLScw { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version(); context.insert("version_major", &version.to_major_version_string()); context.insert("version", &version.to_string()); // Scaleway needs to have major version only diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index 376611d1..bd6fb29c 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -212,7 +212,7 @@ impl Service for PostgresScw { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -220,7 +220,7 @@ impl Service for PostgresScw { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version(); context.insert("version_major", &version.to_major_version_string()); context.insert("version", &version.to_string()); // Scaleway needs to have major version only diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 3abc9ad4..4cdd09dd 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -176,13 +176,13 @@ impl Service for RedisScw { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 2900525e..e010119d 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -117,7 +117,7 @@ pub fn scw_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -133,8 +133,8 @@ pub fn scw_helm_charts( qovery_terraform_config_file ); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; @@ -454,7 +454,7 @@ datasources: type: loki url: \"http://{}.{}.svc:3100\" ", - prometheus_internal_url.clone(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace.to_string(), &loki.chart_info.name, diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index b9eb650b..3b698794 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -165,7 +165,7 @@ impl Kapsule { QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), - Some(zone.region_str().to_string()), + Some(zone.region_str()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id, name), ), @@ -183,8 +183,8 @@ impl Kapsule { context.clone(), "s3-temp-id".to_string(), "default-s3".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), zone, BucketDeleteStrategy::Empty, false, @@ -240,7 +240,7 @@ impl Kapsule { Err(e) => { let msg = format!("wasn't able to retrieve SCW cluster information from the API. {:?}", e); return Err(EngineError::new_cannot_get_cluster_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } @@ -248,9 +248,9 @@ impl Kapsule { // if no cluster exists let cluster_info_content = cluster_info.clusters.unwrap(); - if &cluster_info_content.len() == &(0 as usize) { + if &cluster_info_content.len() == &0_usize { return Ok(None); - } else if &cluster_info_content.len() != &(1 as usize) { + } else if &cluster_info_content.len() != &1_usize { let msg = format!( "too many clusters found with this name, where 1 was expected. {:?}", &cluster_info_content.len() @@ -268,7 +268,7 @@ impl Kapsule { &self, cluster_info: ScalewayK8sV1Cluster, ) -> Result, ScwNodeGroupErrors> { - let error_cluster_id = format!("expected cluster id for this Scaleway cluster"); + let error_cluster_id = "expected cluster id for this Scaleway cluster".to_string(); let cluster_id = match cluster_info.id { None => { return Err(ScwNodeGroupErrors::NodeGroupValidationError( @@ -291,7 +291,7 @@ impl Kapsule { Ok(x) => x, Err(e) => { let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id); - let msg_with_error = format!("{}. {:?}", msg.clone(), e); + let msg_with_error = format!("{}. {:?}", msg, e); return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( msg_with_error, Some(msg), @@ -558,7 +558,7 @@ impl Kapsule { LogLevel::Error, EngineEvent::Error( EngineError::new_missing_required_env_variable( - event_details.clone(), + event_details, "VAULT_SECRET_ID".to_string(), ), None, @@ -671,9 +671,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -682,13 +682,12 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -723,7 +722,7 @@ impl Kapsule { ), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -756,7 +755,7 @@ impl Kapsule { .create_bucket(self.kubeconfig_bucket_name().as_str()) { let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), + event_details, self.kubeconfig_bucket_name(), e, ); @@ -767,11 +766,8 @@ impl Kapsule { // Logs bucket if let Err(e) = self.object_storage.create_bucket(self.logs_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - e, - ); + let error = + EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); return Err(error); @@ -780,7 +776,7 @@ impl Kapsule { // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -795,7 +791,7 @@ impl Kapsule { kubeconfig_path.to_str().expect("No path for Kubeconfig"), ) { let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( - event_details.clone(), + event_details, self.logs_bucket_name(), kubeconfig_name.to_string(), e, @@ -809,7 +805,7 @@ impl Kapsule { if cluster_info.is_none() { let msg = "no cluster found from the Scaleway API".to_string(); return Err(EngineError::new_no_cluster_found_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } @@ -822,7 +818,7 @@ impl Kapsule { match e { ScwNodeGroupErrors::CloudProviderApiError(c) => { return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(c), )) } @@ -838,7 +834,7 @@ impl Kapsule { ScwNodeGroupErrors::MultipleClusterFound => { let msg = "multiple clusters found, can't match the correct node groups".to_string(); return Err(EngineError::new_multiple_cluster_found_expected_one_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } @@ -852,15 +848,15 @@ impl Kapsule { ), ), ScwNodeGroupErrors::MissingNodePoolInfo => { - let msg = format!("Error with Scaleway API while trying to retrieve node pool info"); + let msg = "Error with Scaleway API while trying to retrieve node pool info".to_string(); return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(CommandError::new_from_safe_message(msg)), )); } ScwNodeGroupErrors::NodeGroupValidationError(c) => { return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(c), )); } @@ -974,7 +970,7 @@ impl Kapsule { Err(Operation { error, .. }) => return Err(error), Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_k8s_node_not_ready( - event_details.clone(), + event_details, CommandError::new(msg, Some("Waiting for too long worker nodes to be ready".to_string())), )) } @@ -1006,7 +1002,7 @@ impl Kapsule { ) } Err(e) => { - return Err(EngineError::new_k8s_node_not_ready(event_details.clone(), e)); + return Err(EngineError::new_k8s_node_not_ready(event_details, e)); } }; @@ -1027,8 +1023,8 @@ impl Kapsule { self.cluster_name(), "scw".to_string(), self.context.is_test_cluster(), - self.cloud_provider.access_key_id().to_string(), - self.cloud_provider.secret_access_key().to_string(), + self.cloud_provider.access_key_id(), + self.cloud_provider.secret_access_key(), self.options.scaleway_project_id.to_string(), self.options.qovery_engine_location.clone(), self.context.is_feature_enabled(&Features::LogsHistory), @@ -1056,13 +1052,13 @@ impl Kapsule { format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, Some(&temp_dir), - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, ) .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -1086,12 +1082,12 @@ impl Kapsule { match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { Ok(ok_line) => self.logger().log( LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(ok_line)), + EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line)), ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), ), ), @@ -1160,9 +1156,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1171,13 +1167,12 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1195,7 +1190,7 @@ impl Kapsule { tf_workers_resources_name } Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details.clone(), e); + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); self.logger() .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); return Err(error); @@ -1203,10 +1198,7 @@ impl Kapsule { }; if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes( - event_details.clone(), - None, - )); + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); } let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; @@ -1232,7 +1224,7 @@ impl Kapsule { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.to_string()), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); } _ => {} } @@ -1247,7 +1239,7 @@ impl Kapsule { Err(e) => { let safe_message = format!("Error while looking at the API metric value {}", metric_name); OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.message()), Some(safe_message.to_string())))) + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.message()), Some(safe_message)))) } }; }); @@ -1260,7 +1252,7 @@ impl Kapsule { return Err(error) } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) } } } @@ -1292,12 +1284,12 @@ impl Kapsule { self.send_to_customer(&message, &listeners_helper); self.logger().log( LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), + EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), ); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )), } @@ -1343,9 +1335,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1354,13 +1346,12 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1496,7 +1487,7 @@ impl Kapsule { ) .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion @@ -1518,12 +1509,12 @@ impl Kapsule { let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1592,11 +1583,11 @@ impl Kapsule { ), ); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { + match helm.uninstall(&chart_info, &[]) { Ok(_) => self.logger().log( LogLevel::Info, EngineEvent::Deleting( @@ -1659,18 +1650,18 @@ impl Kapsule { self.logger().log( LogLevel::Info, EngineEvent::Deleting( - event_details.clone(), + event_details, EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), ), ); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } @@ -1758,7 +1749,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1772,7 +1763,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1840,9 +1831,9 @@ impl Kubernetes for Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1853,9 +1844,9 @@ impl Kubernetes for Kapsule { crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1882,7 +1873,7 @@ impl Kubernetes for Kapsule { self.logger().log( LogLevel::Info, EngineEvent::Deploying( - event_details.clone(), + event_details, EventMessage::new_from_safe( "Kubernetes nodes have been successfully upgraded.".to_string(), ), @@ -1891,7 +1882,7 @@ impl Kubernetes for Kapsule { } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( - event_details.clone(), + event_details, kubernetes_upgrade_status.requested_version.to_string(), e, )); @@ -1899,7 +1890,7 @@ impl Kubernetes for Kapsule { }, Err(e) => { return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), + event_details, e, )); } @@ -1916,7 +1907,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1930,7 +1921,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1944,7 +1935,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1958,7 +1949,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1972,7 +1963,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -1986,7 +1977,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -2000,7 +1991,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -2014,7 +2005,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -2070,7 +2061,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -2098,7 +2089,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) diff --git a/src/cloud_provider/scaleway/kubernetes/node.rs b/src/cloud_provider/scaleway/kubernetes/node.rs index e1d85bd5..8cd5419c 100644 --- a/src/cloud_provider/scaleway/kubernetes/node.rs +++ b/src/cloud_provider/scaleway/kubernetes/node.rs @@ -82,7 +82,7 @@ impl FromStr for ScwInstancesType { "render-s" => Ok(ScwInstancesType::RenderS), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 4844331d..3c62e19c 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -154,7 +154,7 @@ impl Service for RouterScw { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) @@ -167,8 +167,6 @@ impl Service for RouterScw { _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str()); @@ -292,9 +290,9 @@ impl Create for RouterScw { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -319,7 +317,7 @@ impl Create for RouterScw { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e)) } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 1948bda1..9500b890 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -296,7 +296,7 @@ impl<'a> ServiceType<'a> { impl<'a> ToString for ServiceType<'a> { fn to_string(&self) -> String { - self.name().to_string() + self.name() } } @@ -311,7 +311,7 @@ where { let kubernetes = deployment_target.kubernetes; let environment = deployment_target.environment; - match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details.clone()) { + match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details) { Ok(lines) => lines, Err(err) => { logger.log( @@ -390,9 +390,9 @@ where tera_context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -440,7 +440,7 @@ where service.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e))?; crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry( @@ -558,12 +558,7 @@ where let helm_release_name = service.helm_release_name(); // clean the resource - let _ = helm_uninstall_release( - kubernetes, - environment, - helm_release_name.as_str(), - event_details.clone(), - )?; + let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str(), event_details)?; Ok(()) } @@ -602,9 +597,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_common_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -615,9 +610,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -626,12 +621,12 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), external_svc_dir.as_str(), - context.clone(), + context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + external_svc_dir, e, )); } @@ -665,9 +660,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -676,12 +671,12 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_values_dir(), workspace_dir.as_str(), - context.clone(), + context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_values_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -698,7 +693,7 @@ where // create a namespace with labels if it does not exist crate::cmd::kubectl::kubectl_exec_create_namespace( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, environment.namespace(), namespace_labels, kubernetes.cloud_provider().credentials_environment_variables(), @@ -726,7 +721,7 @@ where service.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e))?; // check app status @@ -741,7 +736,7 @@ where } return Err(EngineError::new_database_failed_to_start_after_several_retries( - event_details.clone(), + event_details, service.name_with_id(), service.service_type().name(), match is_pod_ready { @@ -775,9 +770,9 @@ where tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_common_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -788,9 +783,9 @@ where tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -798,13 +793,13 @@ where let external_svc_dir = format!("{}/{}", workspace_dir, "external-name-svc"); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + &external_svc_dir, tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + external_svc_dir, e, )); } @@ -812,12 +807,12 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), workspace_dir.as_str(), - tera_context.clone(), + tera_context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -827,7 +822,7 @@ where logger.log( LogLevel::Info, EngineEvent::Deleting( - event_details.clone(), + event_details, EventMessage::new_from_safe("Deleting secret containing tfstates".to_string()), ), ); @@ -835,8 +830,7 @@ where delete_terraform_tfstate_secret(kubernetes, environment.namespace(), &get_tfstate_name(service)); } Err(e) => { - let engine_err = - EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details.clone(), e); + let engine_err = EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, e); logger.log(LogLevel::Error, EngineEvent::Error(engine_err.clone(), None)); @@ -847,12 +841,7 @@ where // If not managed, we use helm to deploy let helm_release_name = service.helm_release_name(); // clean the resource - let _ = helm_uninstall_release( - kubernetes, - environment, - helm_release_name.as_str(), - event_details.clone(), - )?; + let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str(), event_details)?; } Ok(()) @@ -925,10 +914,10 @@ where VersionsNumber::from_str(&service.version()).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), service.version(), e) })?, - VersionsNumber::from_str(&version.to_string()).map_err(|e| { + VersionsNumber::from_str(&version).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), version.to_string(), e) })?, - Some(message.to_string()), + Some(message), )); } @@ -936,7 +925,7 @@ where VersionsNumber::from_str(&service.version()).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), service.version(), e) })?, - VersionsNumber::from_str(&version.to_string()).map_err(|e| { + VersionsNumber::from_str(&version).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), version.to_string(), e) })?, None, @@ -959,7 +948,7 @@ where listeners_helper.deployment_error(progress_info); let error = EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, service.service_type().name(), service.version(), ); @@ -1028,21 +1017,21 @@ where listeners_helper.deployment_in_progress(progress_info); logger.log( LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(message.to_string())), + EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(message)), ); } CheckAction::Pause => { listeners_helper.pause_in_progress(progress_info); logger.log( LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message.to_string())), + EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), ); } CheckAction::Delete => { listeners_helper.delete_in_progress(progress_info); logger.log( LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), ); } } @@ -1107,10 +1096,10 @@ where CheckAction::Delete => listeners_helper.delete_error(progress_info), } - return Err(EngineError::new_k8s_service_issue( - event_details.clone(), + Err(EngineError::new_k8s_service_issue( + event_details, CommandError::new(err.message(), Some("Error with Kubernetes service".to_string())), - )); + )) } _ => { let progress_info = ProgressInfo::new( @@ -1156,7 +1145,7 @@ where // get logs let logs = crate::cmd::kubectl::kubectl_exec_logs( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, environment.namespace(), selector.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), @@ -1174,7 +1163,7 @@ where // get pod state let pods = crate::cmd::kubectl::kubectl_exec_get_pods( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, Some(environment.namespace()), Some(selector.as_str()), kubernetes.cloud_provider().credentials_environment_variables(), @@ -1253,7 +1242,7 @@ pub fn helm_uninstall_release( .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; let chart = ChartInfo::new_from_release_name(helm_release_name, environment.namespace()); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } @@ -1298,9 +1287,7 @@ where S: Service + Listen, F: Fn() -> R, { - let event_details = service - .get_event_details(Stage::Environment(EnvironmentStep::Deploy)) - .clone(); + let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); let logger = service.logger().clone_dyn(); let listeners = std::clone::Clone::clone(service.listeners()); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 97a74f99..ccc5c432 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -167,10 +167,7 @@ pub fn generate_supported_version( if minor_min == minor_max { // add short minor format targeting latest version - supported_versions.insert( - format!("{}.{}", major.to_string(), minor_max.to_string()), - latest_major_version.clone(), - ); + supported_versions.insert(format!("{}.{}", major, minor_max), latest_major_version.clone()); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor_min, update_min.unwrap()); supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); @@ -184,13 +181,8 @@ pub fn generate_supported_version( for minor in minor_min..minor_max + 1 { // add short minor format targeting latest version supported_versions.insert( - format!("{}.{}", major.to_string(), minor.to_string()), - format!( - "{}.{}.{}", - major.to_string(), - minor.to_string(), - update_max.unwrap().to_string() - ), + format!("{}.{}", major, minor), + format!("{}.{}.{}", major, minor, update_max.unwrap()), ); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); @@ -286,7 +278,7 @@ impl FromStr for VersionsNumber { let major = match version_split.next() { Some(major) => { let major = major.to_string(); - major.replace("v", "") + major.replace('v', "") } None => { return Err(CommandError::new_from_safe_message(format!( @@ -298,7 +290,7 @@ impl FromStr for VersionsNumber { let minor = version_split.next().map(|minor| { let minor = minor.to_string(); - minor.replace("+", "") + minor.replace('+', "") }); let patch = version_split.next().map(|patch| patch.to_string()); @@ -539,12 +531,12 @@ pub fn check_domain_for( } pub fn sanitize_name(prefix: &str, name: &str) -> String { - format!("{}-{}", prefix, name).replace("_", "-") + format!("{}-{}", prefix, name).replace('_', "-") } pub fn managed_db_name_sanitizer(max_size: usize, prefix: &str, name: &str) -> String { let max_size = max_size - prefix.len(); - let mut new_name = format!("{}{}", prefix, name.replace("_", "").replace("-", "")); + let mut new_name = format!("{}{}", prefix, name.replace('_', "").replace('-', "")); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); } diff --git a/src/cmd/command.rs b/src/cmd/command.rs index d7f67e85..364c5488 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -344,14 +344,14 @@ mod tests { #[test] fn test_error() { - let mut cmd = QoveryCommand::new("false", &vec![], &vec![]); + let mut cmd = QoveryCommand::new("false", &[], &[]); assert_eq!(cmd.exec().is_err(), true); assert_eq!(matches!(cmd.exec(), Err(CommandError::ExitStatusError(_))), true); } #[test] fn test_command_with_timeout() { - let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); + let mut cmd = QoveryCommand::new("sleep", &["120"], &[]); let ret = cmd.exec_with_abort( &mut |_| {}, &mut |_| {}, @@ -360,7 +360,7 @@ mod tests { assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); - let mut cmd = QoveryCommand::new("sh", &vec!["-c", "cat /dev/urandom | grep -a --null-data ."], &vec![]); + let mut cmd = QoveryCommand::new("sh", &["-c", "cat /dev/urandom | grep -a --null-data ."], &[]); let ret = cmd.exec_with_abort( &mut |_| {}, &mut |_| {}, @@ -369,7 +369,7 @@ mod tests { assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); - let mut cmd = QoveryCommand::new("sleep", &vec!["1"], &vec![]); + let mut cmd = QoveryCommand::new("sleep", &["1"], &[]); let ret = cmd.exec_with_abort( &mut |_| {}, &mut |_| {}, @@ -380,7 +380,7 @@ mod tests { #[test] fn test_command_with_abort() { - let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); + let mut cmd = QoveryCommand::new("sleep", &["120"], &[]); let should_kill = Arc::new(AtomicBool::new(false)); let should_kill2 = should_kill.clone(); let barrier = Arc::new(Barrier::new(2)); diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 0d1c7965..835878c0 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -83,15 +83,15 @@ impl Docker { let args = vec!["buildx", "version"]; let buildx_cmd_exist = docker_exec( &args, - &docker.get_all_envs(&vec![]), + &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never(), ); if let Err(_) = buildx_cmd_exist { - return Err(DockerError::InvalidConfig(format!( - "Docker buildx plugin for buildkit is not correctly installed" - ))); + return Err(DockerError::InvalidConfig( + "Docker buildx plugin for buildkit is not correctly installed".to_string(), + )); } // In order to be able to use --cache-from --cache-to for buildkit, @@ -108,7 +108,7 @@ impl Docker { ]; let _ = docker_exec( &args, - &docker.get_all_envs(&vec![]), + &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never(), @@ -130,7 +130,7 @@ impl Docker { pub fn login(&self, registry: &Url) -> Result<(), DockerError> { info!("Docker login {} as user {}", registry, registry.username()); - let password = urlencoding::decode(®istry.password().unwrap_or_default()) + let password = urlencoding::decode(registry.password().unwrap_or_default()) .unwrap_or_default() .to_string(); let args = vec![ @@ -144,7 +144,7 @@ impl Docker { docker_exec( &args, - &self.get_all_envs(&vec![]), + &self.get_all_envs(&[]), &mut |line| info!("{}", line), &mut |line| warn!("{}", line), &CommandKiller::never(), @@ -157,8 +157,8 @@ impl Docker { info!("Docker check locally image exist {:?}", image); let ret = docker_exec( - &vec!["image", "inspect", &image.image_name()], - &self.get_all_envs(&vec![]), + &["image", "inspect", &image.image_name()], + &self.get_all_envs(&[]), &mut |line| info!("{}", line), &mut |line| warn!("{}", line), &CommandKiller::never(), @@ -172,8 +172,8 @@ impl Docker { info!("Docker check remotely image exist {:?}", image); let ret = docker_exec( - &vec!["manifest", "inspect", &image.image_name()], - &self.get_all_envs(&vec![]), + &["manifest", "inspect", &image.image_name()], + &self.get_all_envs(&[]), &mut |line| info!("{}", line), &mut |line| warn!("{}", line), &CommandKiller::never(), @@ -200,8 +200,8 @@ impl Docker { info!("Docker pull {:?}", image); docker_exec( - &vec!["pull", &image.image_name()], - &self.get_all_envs(&vec![]), + &["pull", &image.image_name()], + &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort, @@ -319,7 +319,7 @@ impl Docker { let _ = docker_exec( &args_string.iter().map(|x| x.as_str()).collect::>(), - &self.get_all_envs(&vec![]), + &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort, @@ -384,7 +384,7 @@ impl Docker { docker_exec( &args_string.iter().map(|x| x.as_str()).collect::>(), - &self.get_all_envs(&vec![]), + &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort, @@ -409,7 +409,7 @@ impl Docker { docker_exec( &args, - &self.get_all_envs(&vec![]), + &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort, @@ -431,7 +431,7 @@ impl Docker { for prune in all_prunes_commands { let ret = docker_exec( &prune, - &self.get_all_envs(&vec![]), + &self.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never(), @@ -461,7 +461,7 @@ where X: FnMut(String), { let mut cmd = QoveryCommand::new("docker", args, envs); - let ret = cmd.exec_with_abort(stdout_output, stderr_output, &cmd_killer); + let ret = cmd.exec_with_abort(stdout_output, stderr_output, cmd_killer); match ret { Ok(_) => Ok(()), @@ -550,7 +550,7 @@ mod tests { Path::new("tests/docker/multi_stage_simple/Dockerfile"), Path::new("tests/docker/multi_stage_simple/"), &image_to_build, - &vec![], + &[], &image_cache, false, &mut |msg| println!("{}", msg), @@ -565,7 +565,7 @@ mod tests { Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), Path::new("tests/docker/multi_stage_simple/"), &image_to_build, - &vec![], + &[], &image_cache, false, &mut |msg| println!("{}", msg), @@ -597,7 +597,7 @@ mod tests { Path::new("tests/docker/multi_stage_simple/Dockerfile"), Path::new("tests/docker/multi_stage_simple/"), &image_to_build, - &vec![], + &[], &image_cache, false, &mut |msg| println!("{}", msg), @@ -611,7 +611,7 @@ mod tests { Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), Path::new("tests/docker/multi_stage_simple/"), &image_to_build, - &vec![], + &[], &image_cache, false, &mut |msg| println!("{}", msg), @@ -643,7 +643,7 @@ mod tests { Path::new("tests/docker/multi_stage_simple/Dockerfile"), Path::new("tests/docker/multi_stage_simple/"), &image_to_build, - &vec![], + &[], &image_cache, false, &mut |msg| println!("{}", msg), diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index 5e056975..a02bcc7e 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -524,7 +524,7 @@ impl Helm { self.get_chart_version(chart.name.clone(), Some(chart.get_namespace_string().as_str()), envs)? { if installed_version.le(breaking_version) { - self.uninstall(&chart, envs)?; + self.uninstall(chart, envs)?; } } } @@ -579,7 +579,7 @@ mod tests { impl HelmTestCtx { fn cleanup(&self) { - let ret = self.helm.uninstall(&self.chart, &vec![]); + let ret = self.helm.uninstall(&self.chart, &[]); assert!(ret.is_ok()) } @@ -595,7 +595,7 @@ mod tests { ); let mut kube_config = dirs::home_dir().unwrap(); kube_config.push(".kube/config"); - let helm = Helm::new(kube_config.to_str().unwrap(), &vec![]).unwrap(); + let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); let cleanup = HelmTestCtx { helm, chart }; cleanup.cleanup(); @@ -612,19 +612,14 @@ mod tests { #[test] fn check_version() { let mut output = String::new(); - let _ = helm_exec_with_output( - &vec!["version"], - &vec![], - &mut |line| output.push_str(&line), - &mut |_line| {}, - ); + let _ = helm_exec_with_output(&["version"], &[], &mut |line| output.push_str(&line), &mut |_line| {}); assert!(output.contains("Version:\"v3.7.2\"")); } #[test] fn test_release_exist() { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-release-exist"); - let ret = helm.check_release_exist(chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)) } @@ -638,19 +633,19 @@ mod tests { chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); // no existing namespace should return an empty array - let ret = helm.list_release(Some("tsdfsfsdf"), &vec![]); + let ret = helm.list_release(Some("tsdfsfsdf"), &[]); assert!(matches!(ret, Ok(vec) if vec.is_empty())); // install something - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // We should have at least one release in all the release - let ret = helm.list_release(None, &vec![]); + let ret = helm.list_release(None, &[]); assert!(matches!(ret, Ok(vec) if !vec.is_empty())); // We should have at least one release in all the release - let ret = helm.list_release(Some(&chart.get_namespace_string()), &vec![]); + let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); assert!(matches!(ret, Ok(vec) if vec.len() == 1)); // Install a second stuff @@ -659,10 +654,10 @@ mod tests { ref mut chart, } = HelmTestCtx::new("test-list-release-2"); chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); - let ret = helm.list_release(Some(&chart.get_namespace_string()), &vec![]); + let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); assert!(matches!(ret, Ok(vec) if vec.len() == 2)); } @@ -670,7 +665,7 @@ mod tests { fn test_upgrade_diff() { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-diff"); - let ret = helm.upgrade_diff(&chart, &vec![]); + let ret = helm.upgrade_diff(chart, &[]); assert!(matches!(ret, Ok(()))); } @@ -679,23 +674,23 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-rollback"); // check release does not exist yet - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // First revision cannot be rollback - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Err(HelmError::CannotRollback(_)))); // 2nd upgrade - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Rollback should be ok now - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Ok(()))); } @@ -704,15 +699,15 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(_))); } @@ -725,15 +720,15 @@ mod tests { chart.timeout_in_seconds = 1; // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(HelmError::Timeout(_, _, _)))); // Release should not exist if it fails - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); } @@ -743,7 +738,7 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-with-lock-install"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // Spawn our task killer @@ -754,26 +749,26 @@ mod tests { move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); - let mut cmd = QoveryCommand::new("pkill", &vec!["-9", "-f", &format!("helm.*{}", chart_name)], &vec![]); + let mut cmd = QoveryCommand::new("pkill", &["-9", "-f", &format!("helm.*{}", chart_name)], &[]); let _ = cmd.exec(); } }); // install it barrier.wait(); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if release.is_locked())); // New installation should work even if a lock is present - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if !release.is_locked())); } @@ -786,11 +781,11 @@ mod tests { } = HelmTestCtx::new("test-upgrade-with-lock-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // First install - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Spawn our task killer @@ -801,7 +796,7 @@ mod tests { move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); - let mut cmd = QoveryCommand::new("pkill", &vec!["-9", "-f", &format!("helm.*{}", chart_name)], &vec![]); + let mut cmd = QoveryCommand::new("pkill", &["-9", "-f", &format!("helm.*{}", chart_name)], &[]); let _ = cmd.exec(); } }); @@ -811,19 +806,19 @@ mod tests { value: "6".to_string(), }]; barrier.wait(); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if release.is_locked() && release.version == 2)); // New installation should work even if a lock is present - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if !release.is_locked() && release.version == 4)); } @@ -832,27 +827,27 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-uninstall"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // deleting something that does not exist should not be an issue - let ret = helm.uninstall(&chart, &vec![]); + let ret = helm.uninstall(chart, &[]); assert!(matches!(ret, Ok(()))); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(_))); // Delete it - let ret = helm.uninstall(&chart, &vec![]); + let ret = helm.uninstall(chart, &[]); assert!(matches!(ret, Ok(()))); // check release does not exist anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); } @@ -862,8 +857,8 @@ mod tests { ref helm, ref mut chart, } = HelmTestCtx::new("test-version-release"); - let _ = helm.upgrade(&chart, &vec![]); - let releases = helm.list_release(Some(&chart.get_namespace_string()), &vec![]).unwrap(); + let _ = helm.upgrade(chart, &[]); + let releases = helm.list_release(Some(&chart.get_namespace_string()), &[]).unwrap(); assert_eq!(releases[0].clone().version.unwrap(), Version::new(0, 1, 0)) } } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index ae9f0303..ad1f998d 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -123,7 +123,7 @@ where cmd_args.into_iter().map(|a| a.to_string()).collect(), envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect(), Some(output_string.to_string()), - Some(err_output_string.to_string()), + Some(err_output_string), )), } } @@ -166,11 +166,7 @@ where } Ok(Some( - result - .metadata - .annotations - .kubernetes_digitalocean_com_load_balancer_id - .clone(), + result.metadata.annotations.kubernetes_digitalocean_com_load_balancer_id, )) } Err(e) => Err(e), @@ -527,7 +523,7 @@ pub fn kubectl_exec_delete_namespace

( where P: AsRef, { - if does_contain_terraform_tfstate(&kubernetes_config, &namespace, &envs)? { + if does_contain_terraform_tfstate(&kubernetes_config, namespace, &envs)? { return Err(CommandError::new_from_safe_message( "Namespace contains terraform tfstates in secret, can't delete it !".to_string(), )); @@ -789,7 +785,7 @@ where P: AsRef, { kubectl_exec::( - vec!["get", "configmap", "-o", "json", "-n", namespace, &name], + vec!["get", "configmap", "-o", "json", "-n", namespace, name], kubernetes_config, envs, ) @@ -845,7 +841,7 @@ where P: AsRef, { let result = kubectl_exec::>( - vec!["delete", &object.to_string(), "--all-namespaces", "--all"], + vec!["delete", object, "--all-namespaces", "--all"], kubernetes_config, envs, ); @@ -857,7 +853,7 @@ where if lower_case_message.contains("no resources found") || lower_case_message.ends_with(" deleted") { return Ok(()); } - return Err(e); + Err(e) } } } @@ -972,7 +968,7 @@ where "scale", "--replicas", &replicas_count.to_string(), - &kind_formatted, + kind_formatted, "--selector", selector, ], @@ -1128,7 +1124,7 @@ where .container_statuses .as_ref() .expect("Cannot get container statuses") - .into_iter() + .iter() .any(|e| { e.state.waiting.as_ref().is_some() && e.state.waiting.as_ref().expect("cannot get container state").reason == KubernetesPodStatusReason::CrashLoopBackOff // check 1 diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index 44f1a220..7d9fa05c 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -70,8 +70,8 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> { match result { Ok(_) => Ok(()), - Err(Operation { error, .. }) => return Err(error), - Err(retry::Error::Internal(e)) => return Err(CommandError::new(e, None)), + Err(Operation { error, .. }) => Err(error), + Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), } } @@ -195,7 +195,7 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co let mut cmd = QoveryCommand::new( "terraform", &args, - &vec![(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())], + &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())], ); cmd.set_current_dir(root_dir); diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 9577b0e9..e369495a 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -32,8 +32,8 @@ impl DOCR { let registry_name = name.to_string(); let registry_name2 = name.to_string(); let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); - let _ = registry.set_username(&api_key); - let _ = registry.set_password(Some(&api_key)); + let _ = registry.set_username(api_key); + let _ = registry.set_password(Some(api_key)); let registry_info = ContainerRegistryInfo { endpoint: registry, @@ -99,7 +99,7 @@ impl DOCR { raw_error_message: format!( "Failed to create DOCR repository `{}`, error: {}.", registry_name.as_str(), - e.to_string(), + e, ), }); } @@ -111,7 +111,7 @@ impl DOCR { raw_error_message: format!( "Failed to create DOCR repository `{}`, error: {}.", registry_name.as_str(), - e.to_string(), + e, ), }); } @@ -141,7 +141,7 @@ impl DOCR { Err(e) => { return Err(ContainerRegistryError::CannotDeleteRegistry { registry_name: "default".to_string(), - raw_error_message: format!("No response from the Digital Ocean API, error: {}", e.to_string()), + raw_error_message: format!("No response from the Digital Ocean API, error: {}", e), }); } } @@ -150,8 +150,8 @@ impl DOCR { pub fn exec_docr_login(&self) -> Result<(), ContainerRegistryError> { let mut cmd = QoveryCommand::new( "doctl", - &vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], - &vec![], + &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], + &[], ); match cmd.exec() { @@ -184,7 +184,7 @@ impl ContainerRegistry for DOCR { fn create_registry(&self) -> Result<(), ContainerRegistryError> { // Digital Ocean only allow one registry per account... - if let Err(_) = get_current_registry_name(self.api_key.as_str()) { + if get_current_registry_name(self.api_key.as_str()).is_err() { let _ = self.create_registry(self.name())?; } @@ -282,17 +282,14 @@ pub fn subscribe_kube_cluster_to_container_registry( Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { registry_name: "default".to_string(), cluster_id: cluster_uuid.to_string(), - raw_error_message: format!("Unable to call Digital Ocean when tyring to subscribe repository to cluster, error: {}", e.to_string()), + raw_error_message: format!("Unable to call Digital Ocean when tyring to subscribe repository to cluster, error: {}", e), }), } } Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { registry_name: "default".to_string(), cluster_id: cluster_uuid.to_string(), - raw_error_message: format!( - "Unable to Serialize digital ocean cluster uuids, error: {}", - e.to_string() - ), + raw_error_message: format!("Unable to Serialize digital ocean cluster uuids, error: {}", e), }), }; } @@ -316,7 +313,7 @@ pub fn get_current_registry_name(api_key: &str) -> Result Result Option { let mut dir = DescribeImagesRequest::default(); - dir.repository_name = image.name().to_string(); + dir.repository_name = image.name(); let mut image_identifier = ImageIdentifier::default(); image_identifier.image_tag = Some(image.tag.to_string()); @@ -183,7 +183,7 @@ impl ECR { return Err(ContainerRegistryError::CannotCreateRepository { registry_name: self.name.to_string(), repository_name: repository_name.to_string(), - raw_error_message: e.to_string(), + raw_error_message: e, }) } }; diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 898fdb35..fd3849ca 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -102,11 +102,7 @@ impl ScalewayCR { // We consider every registry namespace names are unique if let Some(registries) = scaleway_registry_namespaces { - if let Some(registry) = registries - .into_iter() - .filter(|r| r.status == Some(Status::Ready)) - .next() - { + if let Some(registry) = registries.into_iter().find(|r| r.status == Some(Status::Ready)) { return Some(registry); } } @@ -223,13 +219,11 @@ impl ScalewayCR { registry_to_delete.id.unwrap().as_str(), )) { Ok(res) => Ok(res), - Err(e) => { - return Err(ContainerRegistryError::CannotDeleteRepository { - registry_name: self.name.to_string(), - repository_name: namespace_name.to_string(), - raw_error_message: e.to_string(), - }); - } + Err(e) => Err(ContainerRegistryError::CannotDeleteRepository { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + raw_error_message: e.to_string(), + }), } } @@ -294,7 +288,7 @@ impl ContainerRegistry for ScalewayCR { fn does_image_exists(&self, image: &Image) -> bool { let image = docker::ContainerImage { registry: self.registry_info.endpoint.clone(), - name: image.name().clone(), + name: image.name(), tags: vec![image.tag.clone()], }; match self.context.docker.does_image_exist_remotely(&image) { diff --git a/src/errors/io.rs b/src/errors/io.rs index 9fb29a88..eddd9621 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -247,10 +247,7 @@ impl From for EngineError { event_details: EventDetails::from(error.event_details), qovery_log_message: error.qovery_log_message, user_log_message: error.user_log_message, - message: match error.message { - Some(msg) => Some(CommandError::from(msg)), - None => None, - }, + message: error.message.map(CommandError::from), link: error.link.map(|url| url.to_string()), hint_message: error.hint_message, } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 5a2638c4..6e0fb834 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -84,8 +84,8 @@ impl CommandError { ) -> Self { let mut unsafe_message = format!( "{}\ncommand: {} {}\nenv: {}", - message.to_string(), - bin.to_string(), + message, + bin, cmd_args.join(" "), envs.iter() .map(|(k, v)| format!("{}={}", k, v)) @@ -432,7 +432,7 @@ impl EngineError { }, ), qovery_log_message: message.to_string(), - user_log_message: message.to_string(), + user_log_message: message, message: None, link: None, hint_message: None, @@ -494,7 +494,7 @@ impl EngineError { event_details, Tag::MissingRequiredEnvVariable, message.to_string(), - message.to_string(), + message, None, None, None, @@ -669,7 +669,7 @@ impl EngineError { Tag::CannotRetrieveClusterConfigFile, message.to_string(), message.to_string(), - Some(error_message.into()), + Some(error_message), None, None, ) @@ -779,7 +779,7 @@ impl EngineError { event_details, Tag::NotEnoughResourcesToDeployEnvironment, message.to_string(), - message.to_string(), + message, None, None, Some("Consider to add one more node or upgrade your nodes configuration. If not possible, pause or delete unused environments.".to_string()), @@ -809,7 +809,7 @@ impl EngineError { event_details, Tag::CannotUninstallHelmChart, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -858,7 +858,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sMasterVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -886,7 +886,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sRequestedUpgradeVersion, message.to_string(), - message.to_string(), + message, error_message, None, None, @@ -912,7 +912,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sKubeletWorkerVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -935,7 +935,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sKubeProxyVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -981,7 +981,7 @@ impl EngineError { event_details, Tag::K8sPodDisruptionBudgetInInvalidState, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1029,7 +1029,7 @@ impl EngineError { event_details, Tag::K8sCannotDeletePod, message.to_string(), - message.to_string(), + message, Some(raw_k8s_error), None, None, @@ -1102,7 +1102,7 @@ impl EngineError { event_details, Tag::K8sUpgradeDeployedVsRequestedVersionsInconsistency, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1134,7 +1134,7 @@ impl EngineError { event_details, Tag::K8sScaleReplicas, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1207,7 +1207,7 @@ impl EngineError { event_details, Tag::K8sGetLogs, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1232,7 +1232,7 @@ impl EngineError { event_details, Tag::K8sGetLogs, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1262,7 +1262,7 @@ impl EngineError { event_details, Tag::K8sDescribe, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1283,7 +1283,7 @@ impl EngineError { event_details, Tag::K8sHistory, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1308,7 +1308,7 @@ impl EngineError { event_details, Tag::K8sCannotCreateNamespace, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1338,7 +1338,7 @@ impl EngineError { event_details, Tag::K8sPodIsNotReady, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1366,7 +1366,7 @@ impl EngineError { event_details, Tag::K8sNodeIsNotReadyWithTheRequestedVersion, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1416,7 +1416,7 @@ impl EngineError { event_details, Tag::K8sValidateRequiredCPUandBurstableError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, Some("Please ensure your configuration is valid.".to_string()), @@ -1436,7 +1436,7 @@ impl EngineError { event_details, Tag::CannotFindRequiredBinary, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1464,7 +1464,7 @@ impl EngineError { event_details, Tag::SubnetsCountShouldBeEven, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1489,7 +1489,7 @@ impl EngineError { event_details, Tag::CannotGetOrCreateIamRole, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1519,7 +1519,7 @@ impl EngineError { event_details, Tag::CannotCopyFilesFromDirectoryToDirectory, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1566,7 +1566,7 @@ impl EngineError { event_details, Tag::TerraformCannotRemoveEntryOut, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1664,7 +1664,7 @@ impl EngineError { event_details, Tag::TerraformContextUnsupportedParameterValue, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -1802,13 +1802,13 @@ impl EngineError { helm_chart: String, raw_error: CommandError, ) -> EngineError { - let message = format!("Error while uninstalling helm chart: `{}`.", helm_chart.to_string()); + let message = format!("Error while uninstalling helm chart: `{}`.", helm_chart); EngineError::new( event_details, Tag::HelmChartUninstallError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1831,15 +1831,14 @@ impl EngineError { ) -> EngineError { let message = format!( "Error while trying to get helm chart `{}` history in namespace `{}`.", - helm_chart.to_string(), - namespace + helm_chart, namespace ); EngineError::new( event_details, Tag::HelmHistoryError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1878,16 +1877,13 @@ impl EngineError { product_name: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to get supported versions for `{}`.", - product_name.to_string() - ); + let message = format!("Error while trying to get supported versions for `{}`.", product_name); EngineError::new( event_details, Tag::CannotGetSupportedVersions, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1906,17 +1902,13 @@ impl EngineError { product_name: String, version: String, ) -> EngineError { - let message = format!( - "Error, version `{}` is not supported for `{}`.", - version.to_string(), - product_name.to_string() - ); + let message = format!("Error, version `{}` is not supported for `{}`.", version, product_name); EngineError::new( event_details, Tag::UnsupportedVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1962,7 +1954,7 @@ impl EngineError { event_details, Tag::ClientServiceFailedToStart, message.to_string(), - message.to_string(), + message, None, None, Some("Ensure you can run it without issues with `qovery run` and check its logs from the web interface or the CLI with `qovery log`. \ @@ -1992,7 +1984,7 @@ impl EngineError { event_details, Tag::ClientServiceFailedToDeployBeforeStart, message.to_string(), - message.to_string(), + message, None, None, None, @@ -2022,7 +2014,7 @@ impl EngineError { event_details, Tag::DatabaseFailedToStartAfterSeveralRetries, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -2088,7 +2080,7 @@ impl EngineError { event_details, Tag::VersionNumberParsingError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2190,7 +2182,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotFindAnyDockerfile, message.to_string(), - message.to_string(), + message, None, None, Some("Your Dockerfile is not present at the specified location, check your settings.".to_string()), @@ -2216,7 +2208,7 @@ impl EngineError { event_details, Tag::BuilderBuildpackInvalidLanguageFormat, message.to_string(), - message.to_string(), + message, None, None, Some("Expected format `builder[@version]`.".to_string()), @@ -2271,7 +2263,7 @@ impl EngineError { event_details, Tag::BuilderGetBuildError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2296,7 +2288,7 @@ impl EngineError { event_details, Tag::BuilderCloningRepositoryError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2363,7 +2355,7 @@ impl EngineError { event_details, Tag::DockerPushImageError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2393,7 +2385,7 @@ impl EngineError { event_details, Tag::DockerPullImageError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2418,7 +2410,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotReadDockerfile, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2443,7 +2435,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotExtractEnvVarsFromDockerfile, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2468,7 +2460,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotBuildContainerImage, message.to_string(), - message.to_string(), + message, Some(raw_error), None, Some("It looks like there is something wrong in your Dockerfile. Try building the application locally with `docker build --no-cache`.".to_string()), @@ -2498,7 +2490,7 @@ impl EngineError { event_details, Tag::ContainerRegistryRepositoryCreationError, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2526,7 +2518,7 @@ impl EngineError { event_details, Tag::ContainerRegistryRepositorySetLifecycleError, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2552,7 +2544,7 @@ impl EngineError { event_details, Tag::ContainerRegistryGetCredentialsError, message.to_string(), - message.to_string(), + message, None, None, None, @@ -2577,7 +2569,7 @@ impl EngineError { event_details, Tag::ContainerRegistryDeleteImageError, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2601,7 +2593,7 @@ impl EngineError { event_details, Tag::ContainerRegistryImageDoesntExist, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2628,7 +2620,7 @@ impl EngineError { event_details, Tag::ContainerRegistryImageUnreachableAfterPush, message.to_string(), - message.to_string(), + message, None, None, Some("Please try to redeploy in a few minutes.".to_string()), @@ -2652,7 +2644,7 @@ impl EngineError { event_details, Tag::ContainerRegistryRepositoryDoesntExist, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -2677,7 +2669,7 @@ impl EngineError { event_details, Tag::ContainerRegistryDeleteRepositoryError, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -2717,7 +2709,7 @@ impl EngineError { event_details, Tag::ObjectStorageInvalidBucketName, message.to_string(), - message.to_string(), + message, None, None, Some("Check your cloud provider documentation to know bucket naming rules.".to_string()), @@ -2736,16 +2728,13 @@ impl EngineError { bucket_name: String, raw_error: ObjectStorageError, ) -> EngineError { - let message = format!( - "Error, cannot create object storage bucket `{}`.", - bucket_name.to_string(), - ); + let message = format!("Error, cannot create object storage bucket `{}`.", bucket_name,); EngineError::new( event_details, Tag::ObjectStorageCannotCreateBucket, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2768,15 +2757,14 @@ impl EngineError { ) -> EngineError { let message = format!( "Error, cannot put file `{}` into object storage bucket `{}`.", - file_name.to_string(), - bucket_name.to_string(), + file_name, bucket_name, ); EngineError::new( event_details, Tag::ObjectStorageCannotPutFileIntoBucket, message.to_string(), - message.to_string(), + message, Some(raw_error.into()), None, None, @@ -2795,16 +2783,13 @@ impl EngineError { bucket_name: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to empty object storage bucket `{}`.", - bucket_name.to_string(), - ); + let message = format!("Error while trying to empty object storage bucket `{}`.", bucket_name,); EngineError::new( event_details, Tag::ObjectStorageCannotEmptyBucket, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2823,16 +2808,13 @@ impl EngineError { bucket_name: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to tag object storage bucket `{}`.", - bucket_name.to_string(), - ); + let message = format!("Error while trying to tag object storage bucket `{}`.", bucket_name,); EngineError::new( event_details, Tag::ObjectStorageCannotTagBucket, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2853,14 +2835,14 @@ impl EngineError { ) -> EngineError { let message = format!( "Error while trying to activate versioning for object storage bucket `{}`.", - bucket_name.to_string(), + bucket_name, ); EngineError::new( event_details, Tag::ObjectStorageCannotActivateBucketVersioning, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, diff --git a/src/events/io.rs b/src/events/io.rs index c9e21af8..503dc21d 100644 --- a/src/events/io.rs +++ b/src/events/io.rs @@ -79,10 +79,7 @@ impl From for EngineEvent { }, events::EngineEvent::Error(e, m) => EngineEvent::Error { error: EngineError::from(e), - message: match m { - Some(msg) => Some(EventMessage::from(msg)), - None => None, - }, + message: m.map(EventMessage::from), }, events::EngineEvent::Waiting(d, m) => EngineEvent::Waiting { details: EventDetails::from(d), @@ -308,10 +305,7 @@ pub struct EventDetails { impl From for EventDetails { fn from(details: events::EventDetails) -> Self { - let provider_kind = match details.provider_kind { - Some(kind) => Some(Kind::from(kind)), - None => None, - }; + let provider_kind = details.provider_kind.map(Kind::from); EventDetails { provider_kind, organisation_id: details.organisation_id.to_string(), diff --git a/src/events/mod.rs b/src/events/mod.rs index 2cf35408..02664ebb 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -132,11 +132,7 @@ impl EventMessage { EventMessageVerbosity::SafeOnly => self.safe_message.to_string(), EventMessageVerbosity::FullDetails => match &self.full_details { None => self.safe_message.to_string(), - Some(details) => format!( - "{} / Full details: {}", - self.safe_message.to_string(), - details.to_string() - ), + Some(details) => format!("{} / Full details: {}", self.safe_message, details), }, } } @@ -411,7 +407,7 @@ impl EventDetails { /// TODO(benjaminch): remove this dirty hack pub fn clone_changing_stage(event_details: EventDetails, stage: Stage) -> Self { - let mut event_details = event_details.clone(); + let mut event_details = event_details; event_details.stage = stage; event_details } diff --git a/src/fs.rs b/src/fs.rs index 1fa9eb8e..6d3fc428 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -176,7 +176,7 @@ mod tests { let root_dir_path = Path::new(root_dir.as_str()); let directories_to_create = vec![ - format!("{}", root_dir), + root_dir.to_string(), format!("{}/.terraform", root_dir), format!("{}/.terraform/dir-1", root_dir), format!("{}/dir-1", root_dir), @@ -255,7 +255,7 @@ mod tests { } // clean: - tmp_files.into_iter().for_each(|f| drop(f)); + tmp_files.into_iter().for_each(drop); tmp_dir.close().expect("error closing temporary directory"); } } diff --git a/src/git.rs b/src/git.rs index fbbb124e..e41a1010 100644 --- a/src/git.rs +++ b/src/git.rs @@ -14,7 +14,7 @@ fn authentication_callback<'a>( ) -> impl FnMut(&str, Option<&str>, CredentialType) -> Result + 'a { let mut current_credentials: (String, Vec<(CredentialType, Cred)>) = ("".into(), vec![]); - return move |remote_url, username_from_url, allowed_types| { + move |remote_url, username_from_url, allowed_types| { // If we have changed remote, reset our available auth methods if remote_url != current_credentials.0 { current_credentials = ( @@ -43,7 +43,7 @@ fn authentication_callback<'a>( return Ok(credential); } } - }; + } } fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, Error> { @@ -173,7 +173,7 @@ mod tests { /// Since tests are runs in parallel and eventually on the same node, it will avoid having directories collisions between tests running on the same node. pub fn new_with_random_suffix(base_path: String) -> Self { DirectoryForTests { - path: format!("{}_{}", base_path, Uuid::new_v4().to_string()), + path: format!("{}_{}", base_path, Uuid::new_v4()), } } diff --git a/src/logger.rs b/src/logger.rs index 1af91fec..2d96b8d3 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -133,7 +133,7 @@ mod tests { safe_message.to_string(), Some(raw_message.to_string()), )), - Some(link.clone()), + Some(link), Some(hint.to_string()), ), None, @@ -150,7 +150,7 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Infrastructure(InfrastructureStep::Create), - Transmitter::Kubernetes(cluster_id.to_string(), cluster_name.to_string()), + Transmitter::Kubernetes(cluster_id.to_string(), cluster_name), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), @@ -182,7 +182,7 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Environment(EnvironmentStep::Delete), - Transmitter::Application(app_id.to_string(), app_name.to_string()), + Transmitter::Application(app_id.to_string(), app_name), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), @@ -219,7 +219,7 @@ mod tests { tc.description ); assert!( - logs_contain(format!("execution_id=\"{}\"", execution_id.to_string()).as_str()), + logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), "{}", tc.description ); @@ -256,17 +256,17 @@ mod tests { ); assert!( - logs_contain(format!("stage=\"{}\"", details.stage().to_string()).as_str()), + logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), "{}", tc.description ); assert!( - logs_contain(format!("step=\"{}\"", details.stage().sub_step_name().to_string()).as_str()), + logs_contain(format!("step=\"{}\"", details.stage().sub_step_name()).as_str()), "{}", tc.description ); assert!( - logs_contain(format!("transmitter=\"{}\"", details.transmitter().to_string()).as_str()), + logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), "{}", tc.description ); diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 04786390..ae60c442 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -45,9 +45,9 @@ impl S3 { context, id, name, - access_key_id: access_key_id.to_string(), - secret_access_key: secret_access_key.to_string(), - region: region.clone(), + access_key_id, + secret_access_key, + region, bucket_versioning_activated, bucket_ttl_in_seconds, } @@ -59,7 +59,7 @@ impl S3 { fn get_s3_client(&self) -> S3Client { let region = RusotoRegion::from_str(&self.region.to_aws_format()) - .expect(format!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format()).as_str()); + .unwrap_or_else(|_| panic!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format())); let client = Client::new_with( self.get_credentials(), HttpClient::new().expect("unable to create new Http client"), @@ -191,11 +191,11 @@ impl ObjectStorage for S3 { tag_set: vec![ Tag { key: "CreationDate".to_string(), - value: format!("{}", creation_date.to_rfc3339()), + value: creation_date.to_rfc3339(), }, Tag { key: "Ttl".to_string(), - value: format!("{}", self.bucket_ttl_in_seconds.unwrap_or_else(|| 0).to_string()), + value: format!("{}", self.bucket_ttl_in_seconds.unwrap_or(0)), }, ], }, diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index 49348702..c82edc4e 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -76,7 +76,7 @@ impl ScalewayOS { } fn get_endpoint_url_for_region(&self) -> String { - format!("https://s3.{}.scw.cloud", self.zone.region().to_string()) + format!("https://s3.{}.scw.cloud", self.zone.region()) } fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { @@ -217,7 +217,7 @@ impl ObjectStorage for ScalewayOS { }, Tag { key: "Ttl".to_string(), - value: format!("Ttl={}", self.bucket_ttl_in_seconds.unwrap_or_else(|| 0).to_string()), + value: format!("Ttl={}", self.bucket_ttl_in_seconds.unwrap_or(0)), }, ], }, @@ -263,12 +263,10 @@ impl ObjectStorage for ScalewayOS { ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - return Err(ObjectStorageError::CannotDeleteBucket { - bucket_name: bucket_name.to_string(), - raw_error_message: e.to_string(), - }); - } + Err(e) => Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, BucketDeleteStrategy::Empty => Ok(()), // Do not delete the bucket } @@ -331,19 +329,15 @@ impl ObjectStorage for ScalewayOS { let file = File::open(path).unwrap(); Ok((file_path, file)) } - Err(e) => { - return Err(ObjectStorageError::CannotReadFile { - bucket_name: bucket_name.to_string(), - raw_error_message: e.to_string(), - }); - } - }, - Err(e) => { - return Err(ObjectStorageError::CannotOpenFile { + Err(e) => Err(ObjectStorageError::CannotReadFile { bucket_name: bucket_name.to_string(), raw_error_message: e.to_string(), - }); - } + }), + }, + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } Err(e) => Err(ObjectStorageError::CannotGetObjectFile { diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index d5bfb8be..5ad92541 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -329,7 +329,7 @@ impl ObjectStorage for Spaces { Error::Internal(err) => Err(ObjectStorageError::CannotGetObjectFile { bucket_name: bucket_name.to_string(), file_name: object_key.to_string(), - raw_error_message: err.to_string(), + raw_error_message: err, }), }; } diff --git a/src/transaction.rs b/src/transaction.rs index eb85ed60..32182280 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -260,9 +260,9 @@ impl<'a> Transaction<'a> { // FIXME: Cleanup this, qe_environment should not be rebuilt at this step fn rollback_environment(&self, environment: &Environment) -> Result<(), RollbackError> { let action = match environment.action { - Action::Create => self.engine.kubernetes().deploy_environment_error(&environment), - Action::Pause => self.engine.kubernetes().pause_environment_error(&environment), - Action::Delete => self.engine.kubernetes().delete_environment_error(&environment), + Action::Create => self.engine.kubernetes().deploy_environment_error(environment), + Action::Pause => self.engine.kubernetes().pause_environment_error(environment), + Action::Delete => self.engine.kubernetes().delete_environment_error(environment), Action::Nothing => Ok(()), }; @@ -498,7 +498,7 @@ impl<'a> Transaction<'a> { // Even by storing data at the micro seconds precision thread::sleep(std::time::Duration::from_millis(100)); - let _ = match action_fn(&environment) { + let _ = match action_fn(environment) { Err(err) => { let rollback_result = match self.rollback() { Ok(_) => TransactionResult::Rollback(err), diff --git a/src/unit_conversion.rs b/src/unit_conversion.rs index 33b64924..923f5bd8 100644 --- a/src/unit_conversion.rs +++ b/src/unit_conversion.rs @@ -19,7 +19,7 @@ pub fn cpu_string_to_float>(cpu: T) -> f32 { } // the result is in millis, so convert it to float - let cpu = cpu.replace("m", ""); + let cpu = cpu.replace('m', ""); match cpu.parse::() { Ok(v) if v >= 0.0 => v / 1000.0, _ => 0.0, diff --git a/src/utilities.rs b/src/utilities.rs index c4b002bd..d846d63c 100644 --- a/src/utilities.rs +++ b/src/utilities.rs @@ -59,14 +59,14 @@ mod tests_utilities { &"/".to_string(), &Some("Dockerfile".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); let image_tag_2 = compute_image_tag( &"/".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_ne!(image_tag, image_tag_2); @@ -75,7 +75,7 @@ mod tests_utilities { &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_ne!(image_tag, image_tag_3); @@ -84,7 +84,7 @@ mod tests_utilities { &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_eq!(image_tag_3, image_tag_3_2); @@ -93,7 +93,7 @@ mod tests_utilities { &"/".to_string(), &None as &Option<&str>, &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); let mut env_vars_5 = BTreeMap::new(); @@ -103,7 +103,7 @@ mod tests_utilities { &"/".to_string(), &None as &Option<&str>, &env_vars_5, - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_eq!(image_tag_4, image_tag_5); diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 8a001db5..5e7a8d51 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -71,7 +71,7 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -128,20 +128,14 @@ fn deploy_an_environment_with_db_and_pause_it() { // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods( - context.clone(), - Kind::Aws, - environment.clone(), - app_name.clone().as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Aws, environment, app_name.as_str(), secrets); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -213,7 +207,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { let ret = environment_delete.delete_environment(&ea_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -330,13 +324,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY let database_name = format!("postgresql{}-0", &environment_check.databases[0].name); - match is_pod_restarted_env( - context.clone(), - Kind::Aws, - environment_check, - database_name.as_str(), - secrets.clone(), - ) { + match is_pod_restarted_env(context, Kind::Aws, environment_check, database_name.as_str(), secrets) { (true, _) => assert!(true), (false, _) => assert!(false), } @@ -347,7 +335,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - return test_name.to_string(); + test_name.to_string() }) } diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 325b6545..21ed7fca 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -70,7 +70,7 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -112,7 +112,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { ); let ea = environment.clone(); - let selector = format!("appId={}", environment.clone().applications[0].id); + let selector = format!("appId={}", environment.applications[0].id); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -183,7 +183,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( - context.clone(), + context, Kind::Aws, environment.clone(), selector.as_str(), @@ -235,7 +235,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ret = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -291,7 +291,7 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - return test_name.to_string(); + test_name.to_string() }) } @@ -362,7 +362,7 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -433,7 +433,7 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -485,7 +485,7 @@ fn deploy_a_working_environment_with_domain() { let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -553,7 +553,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { + match get_pvc(context, Kind::Aws, environment, secrets) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -564,7 +564,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -638,7 +638,7 @@ fn redeploy_same_app_with_ebs() { let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { + match get_pvc(context.clone(), Kind::Aws, environment, secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -651,26 +651,20 @@ fn redeploy_same_app_with_ebs() { context.clone(), Kind::Aws, environment_check1, - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); let ret = environment_redeploy.deploy_environment(&ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); - let (_, number2) = is_pod_restarted_env( - context.clone(), - Kind::Aws, - environment_check2, - app_name.as_str(), - secrets.clone(), - ); + let (_, number2) = is_pod_restarted_env(context, Kind::Aws, environment_check2, app_name.as_str(), secrets); //nothing change in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -747,7 +741,7 @@ fn deploy_a_not_working_environment_and_after_working_environment() { let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -848,7 +842,7 @@ fn deploy_ok_fail_fail_ok_environment() { let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -900,7 +894,7 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -953,7 +947,7 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { // let time for nginx to reload the config thread::sleep(Duration::from_secs(10)); // checking if cookie is properly set on the app - assert!(routers_sessions_are_sticky(environment.routers.clone())); + assert!(routers_sessions_are_sticky(environment.routers)); let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); diff --git a/tests/aws/aws_s3.rs b/tests/aws/aws_s3.rs index bc25292a..1616762f 100644 --- a/tests/aws/aws_s3.rs +++ b/tests/aws/aws_s3.rs @@ -12,17 +12,17 @@ fn test_delete_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, aws_region.clone(), @@ -34,7 +34,7 @@ fn test_delete_bucket() { aws_os .create_bucket(bucket_name.as_str()) - .expect(format!("error while creating S3 bucket in `{}`", aws_region.to_aws_format()).as_str()); + .unwrap_or_else(|_| panic!("error while creating S3 bucket in `{}`", aws_region.to_aws_format())); // compute: let result = aws_os.delete_bucket(bucket_name.as_str()); @@ -59,17 +59,17 @@ fn test_create_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, aws_region.clone(), @@ -111,20 +111,20 @@ fn test_recreate_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); @@ -156,20 +156,20 @@ fn test_put_file() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); @@ -207,20 +207,20 @@ fn test_get_file() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index c0775af1..9dbf76d3 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -14,7 +14,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { let secrets = FuncTestsSecrets::new(); let region = secrets.AWS_DEFAULT_REGION.as_ref().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); + let aws_region = AwsRegion::from_str(region).expect("Wasn't able to convert the desired region"); let aws_zones = aws_region.get_zones(); let organization_id = generate_id(); @@ -32,7 +32,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = environment.clone(); + let env_action = environment; engine_run_test(|| { cluster_test( @@ -40,7 +40,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { Kind::Aws, context.clone(), logger(), - ®ion, + region, Some(aws_zones), ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 9da21447..5707d8a9 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -76,7 +76,7 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -123,7 +123,7 @@ fn deploy_an_environment_with_db_and_pause_it() { let env_action = environment.clone(); let env_action_delete = environment_delete.clone(); - let ret = environment.deploy_environment(&env_action.clone(), logger.clone(), &engine_config); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = environment.pause_environment(&env_action, logger.clone(), &engine_config); @@ -135,7 +135,7 @@ fn deploy_an_environment_with_db_and_pause_it() { context.clone(), ProviderKind::Do, environment.clone(), - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); @@ -145,11 +145,11 @@ fn deploy_an_environment_with_db_and_pause_it() { assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments(&context, vec![environment], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -229,7 +229,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -381,7 +381,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index d253bf3b..b3df0295 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -69,7 +69,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -130,7 +130,7 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -197,7 +197,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { context.clone(), Kind::Do, environment.clone(), - selector.clone().as_str(), + selector.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); @@ -223,7 +223,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let ret = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -298,7 +298,7 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -353,7 +353,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -433,7 +433,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -522,7 +522,7 @@ fn digitalocean_doks_redeploy_same_app() { context.clone(), Kind::Do, environment_check1, - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); @@ -543,7 +543,7 @@ fn digitalocean_doks_redeploy_same_app() { let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -622,7 +622,7 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -731,7 +731,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -786,7 +786,7 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -849,7 +849,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index e5bc231a..2ab58577 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -18,8 +18,8 @@ fn test_delete_bucket_hard_delete_strategy() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -49,8 +49,8 @@ fn test_delete_bucket_empty_strategy() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::Empty, ); @@ -85,8 +85,8 @@ fn test_create_bucket() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -116,8 +116,8 @@ fn test_recreate_bucket() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -154,8 +154,8 @@ fn test_put_file() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -200,8 +200,8 @@ fn test_get_file() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index f4e3f0dd..3a53d40c 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -28,7 +28,7 @@ fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = environment.clone(); + let env_action = environment; engine_run_test(|| { cluster_test( diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index aeadd344..0cce29ee 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -80,7 +80,7 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -141,7 +141,7 @@ fn deploy_an_environment_with_db_and_pause_it() { context.clone(), ProviderKind::Scw, environment.clone(), - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); @@ -152,11 +152,11 @@ fn deploy_an_environment_with_db_and_pause_it() { assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments(&context, vec![environment], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -231,7 +231,7 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -386,7 +386,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 56168def..68114006 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -71,7 +71,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -134,7 +134,7 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -229,7 +229,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let result = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -306,7 +306,7 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -363,7 +363,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -446,7 +446,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -526,20 +526,14 @@ fn deploy_a_working_environment_and_pause_it() { let result = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Scw, environment.clone(), selector.as_str(), secrets); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup let result = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -577,7 +571,6 @@ fn scaleway_kapsule_redeploy_same_app() { let mut environment = test_utilities::common::working_minimal_environment( &context, secrets - .clone() .DEFAULT_TEST_DOMAIN .as_ref() .expect("DEFAULT_TEST_DOMAIN is not set in secrets") @@ -627,7 +620,7 @@ fn scaleway_kapsule_redeploy_same_app() { context.clone(), Kind::Scw, environment_check1, - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); @@ -648,7 +641,7 @@ fn scaleway_kapsule_redeploy_same_app() { let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -731,7 +724,7 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -843,7 +836,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -901,7 +894,7 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -964,7 +957,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index 899b4a19..2f6be0f9 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -26,7 +26,7 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = environment.clone(); + let env_action = environment; engine_run_test(|| { cluster_test( From 11f1867000dddc55e4bc2cef60cb9a357795382e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 13:37:54 +0100 Subject: [PATCH 60/85] Linter (#664) * Linter * Linter --- src/build_platform/local_docker.rs | 4 +- .../aws/kubernetes/helm_charts.rs | 8 +- src/cloud_provider/aws/kubernetes/mod.rs | 2 +- src/cloud_provider/aws/regions.rs | 92 +++---- .../digitalocean/kubernetes/doks_api.rs | 4 +- .../digitalocean/kubernetes/helm_charts.rs | 10 +- .../digitalocean/kubernetes/mod.rs | 256 +++++++++--------- .../digitalocean/network/vpc.rs | 4 +- src/cloud_provider/helm.rs | 14 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/scaleway/application.rs | 2 +- .../scaleway/kubernetes/helm_charts.rs | 10 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 6 +- src/cloud_provider/service.rs | 16 +- src/cloud_provider/utilities.rs | 38 +-- src/cmd/command.rs | 6 +- src/cmd/docker.rs | 2 +- src/cmd/kubectl.rs | 12 +- src/cmd/terraform.rs | 4 +- src/container_registry/ecr.rs | 6 +- src/error.rs | 4 +- src/errors/io.rs | 2 +- src/errors/mod.rs | 2 +- src/events/mod.rs | 2 + src/fs.rs | 2 +- src/git.rs | 2 +- src/object_storage/s3.rs | 2 +- src/object_storage/scaleway_object_storage.rs | 2 +- src/object_storage/spaces.rs | 2 +- src/template.rs | 5 +- src/transaction.rs | 10 +- 31 files changed, 267 insertions(+), 272 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index bc93ddac..4a1abdb5 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -1,3 +1,5 @@ +#![allow(clippy::redundant_closure)] + use std::io::{Error, ErrorKind}; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -161,7 +163,7 @@ impl LocalDocker { tags: vec!["latest".to_string()], }; - // Check if the image does not exist already remotly, if yes, we skip the build + // Check if the image does not exist already remotely, if yes, we skip the build let image_name = image_to_build.image_name(); log_info(format!("🕵️ Checking if image already exist remotely {}", image_name)); if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 8c236279..9024cc79 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -91,9 +91,9 @@ pub fn aws_helm_charts( }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -665,9 +665,9 @@ datasources: ", prometheus_internal_url, &loki.chart_info.name, - loki_namespace.to_string(), + loki_namespace, &loki.chart_info.name, - loki_namespace.to_string(), + loki_namespace, chart_config_prerequisites.region.clone(), qovery_terraform_config.aws_iam_cloudwatch_key, qovery_terraform_config.aws_iam_cloudwatch_secret, diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 502bf5e1..0a2d90ff 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -255,7 +255,7 @@ impl EKS { &self, event_details: EventDetails, zone_name: &str, - subnet_block: &Vec, + subnet_block: &[String], ) -> Result { if subnet_block.len() % 2 == 1 { return Err(EngineError::new_subnets_count_is_not_even( diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index 907a37f5..3ede17b0 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -102,7 +102,7 @@ pub enum AwsZones { impl ToTerraformString for AwsZones { fn to_terraform_format_string(&self) -> String { - format!("\"{}\"", self.to_string()) + format!("\"{}\"", self) } } @@ -171,11 +171,6 @@ impl AwsRegion { self } - pub fn to_string(&self) -> String { - let enum_name = format!("{}", self); - enum_name - } - pub fn to_aws_format(&self) -> String { match self { AwsRegion::UsEast1 => "us-east-1", @@ -303,8 +298,46 @@ impl Display for RegionAndZoneErrors { } impl AwsZones { - pub fn to_string(&self) -> String { - match self { + pub fn from_string(zone: String) -> Result { + // create tmp region from zone and get zone name (one letter) + let sanitized_zone_name = zone.to_lowercase().replace('-', "").replace('_', ""); + let mut sanitized_region = sanitized_zone_name.clone(); + sanitized_region.pop(); + + // ensure the region exists + let region = match AwsRegion::from_str(&sanitized_region) { + Ok(x) => x, + Err(_) => return Err(RegionNotFound), + }; + if region.to_string().to_lowercase() != sanitized_region { + return Err(RegionNotFound); + }; + + // check if the zone is currently supported + for zone in region.get_zones() { + if zone.to_string().replace('-', "") == sanitized_zone_name { + return Ok(zone); + } + } + + Err(ZoneNotSupported) + } + + pub fn get_region(&self) -> String { + let zone = self.to_string(); + zone[0..zone.len() - 1].to_string() + } +} + +impl fmt::Display for AwsRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Display for AwsZones { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let str = match self { UsEast1A => "us-east-1a", UsEast1B => "us-east-1b", UsEast1C => "us-east-1c", @@ -371,50 +404,9 @@ impl AwsZones { SaEast1A => "sa-east-1a", SaEast1B => "sa-east-1b", SaEast1C => "sa-east-1c", - } - .to_string() - } - - pub fn from_string(zone: String) -> Result { - // create tmp region from zone and get zone name (one letter) - let sanitized_zone_name = zone.to_lowercase().replace('-', "").replace('_', ""); - let mut sanitized_region = sanitized_zone_name.clone(); - sanitized_region.pop(); - - // ensure the region exists - let region = match AwsRegion::from_str(&sanitized_region) { - Ok(x) => x, - Err(_) => return Err(RegionNotFound), - }; - if region.to_string().to_lowercase() != sanitized_region { - return Err(RegionNotFound); }; - // check if the zone is currently supported - for zone in region.get_zones() { - if zone.to_string().replace('-', "") == sanitized_zone_name { - return Ok(zone); - } - } - - Err(ZoneNotSupported) - } - - pub fn get_region(&self) -> String { - let zone = self.to_string(); - zone[0..zone.len() - 1].to_string() - } -} - -impl fmt::Display for AwsRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl fmt::Display for AwsZones { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) + write!(f, "{}", str) } } diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index ba570214..3f393de8 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -60,7 +60,7 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result, + doks_versions: &[KubernetesVersion], wished_version: &str, ) -> Result, CommandError> { let wished_k8s_version = VersionsNumber::from_str(wished_version)?; @@ -76,7 +76,7 @@ fn get_do_kubernetes_latest_slug_version( Err(CommandError::new_from_safe_message(format!( "DOKS version `{}` is not supported.", - wished_k8s_version.to_string() + wished_k8s_version ))) } diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index bf8b7f63..44d789e3 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -145,9 +145,9 @@ pub fn do_helm_charts( }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -505,11 +505,7 @@ datasources: type: loki url: \"http://{}.{}.svc:3100\" ", - prometheus_internal_url, - &loki.chart_info.name, - loki_namespace.to_string(), - &loki.chart_info.name, - loki_namespace.to_string(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace, &loki.chart_info.name, loki_namespace, ); let grafana = CommonChart { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 4296c6ba..7682ba7f 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -1306,6 +1306,134 @@ impl Kubernetes for DOKS { Ok(()) } + fn get_kubeconfig_file(&self) -> Result<(String, File), EngineError> { + let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); + let bucket_name = format!("qovery-kubeconfigs-{}", self.id()); + let object_key = self.get_kubeconfig_filename(); + let stage = Stage::General(GeneralStep::RetrieveClusterConfig); + + // check if kubeconfig locally exists + let local_kubeconfig = match self.get_temp_dir(event_details.clone()) { + Ok(x) => { + let local_kubeconfig_folder_path = format!("{}/{}", &x, &bucket_name); + let local_kubeconfig_generated = format!("{}/{}", &local_kubeconfig_folder_path, &object_key); + if Path::new(&local_kubeconfig_generated).exists() { + match File::open(&local_kubeconfig_generated) { + Ok(_) => Some(local_kubeconfig_generated), + Err(err) => { + self.logger().log( + LogLevel::Debug, + EngineEvent::Debug( + self.get_event_details(stage), + EventMessage::new( + err.to_string(), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), + ), + ), + ); + None + } + } + } else { + None + } + } + Err(_) => None, + }; + + // otherwise, try to get it from digital ocean api + let result = match local_kubeconfig { + Some(local_kubeconfig_generated) => match File::open(&local_kubeconfig_generated) { + Ok(file) => Ok((StringPath::from(&local_kubeconfig_generated), file)), + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + None => { + let kubeconfig = match get_do_kubeconfig_by_cluster_name( + self.cloud_provider.token(), + self.doks_cluster_name().as_str(), + ) { + Ok(kubeconfig) => match kubeconfig { + None => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new_from_safe_message("Kubeconfig is empty".to_string()), + )) + } + Some(content) => content, + }, + Err(e) => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new(e.message(), Some(e.message())), + )) + } + }; + + let workspace_directory = crate::fs::workspace_directory( + self.context().workspace_root_dir(), + self.context().execution_id(), + format!("object-storage/spaces/{}", self.name()), + ) + .map_err(|err| { + EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(err.to_string(), Some(err.to_string())), + ) + }) + .expect("Unable to create directory"); + + let file_path = format!( + "{}/qovery-kubeconfigs-{}/{}.yaml", + workspace_directory, + self.id(), + self.id() + ); + let path = Path::new(file_path.as_str()); + let parent_dir = path.parent().unwrap(); + let _ = block_on(tokio::fs::create_dir_all(parent_dir)); + + match block_on( + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path), + ) { + Ok(mut created_file) => match block_on(created_file.write_all(kubeconfig.as_bytes())) { + Ok(_) => { + let file = File::open(path).unwrap(); + Ok((file_path, file)) + } + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + Err(e) => Err(EngineError::new_cannot_create_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + } + } + }; + + match result { + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new(e.message(), Some(e.message())), + )), + Ok((file_path, file)) => Ok((file_path, file)), + } + } + + fn get_kubeconfig_file_path(&self) -> Result { + let (path, _) = self.get_kubeconfig_file()?; + Ok(path) + } + #[named] fn on_create(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); @@ -1681,134 +1809,6 @@ impl Kubernetes for DOKS { ); Ok(()) } - - fn get_kubeconfig_file_path(&self) -> Result { - let (path, _) = self.get_kubeconfig_file()?; - Ok(path) - } - - fn get_kubeconfig_file(&self) -> Result<(String, File), EngineError> { - let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); - let bucket_name = format!("qovery-kubeconfigs-{}", self.id()); - let object_key = self.get_kubeconfig_filename(); - let stage = Stage::General(GeneralStep::RetrieveClusterConfig); - - // check if kubeconfig locally exists - let local_kubeconfig = match self.get_temp_dir(event_details.clone()) { - Ok(x) => { - let local_kubeconfig_folder_path = format!("{}/{}", &x, &bucket_name); - let local_kubeconfig_generated = format!("{}/{}", &local_kubeconfig_folder_path, &object_key); - if Path::new(&local_kubeconfig_generated).exists() { - match File::open(&local_kubeconfig_generated) { - Ok(_) => Some(local_kubeconfig_generated), - Err(err) => { - self.logger().log( - LogLevel::Debug, - EngineEvent::Debug( - self.get_event_details(stage), - EventMessage::new( - err.to_string(), - Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), - ), - ), - ); - None - } - } - } else { - None - } - } - Err(_) => None, - }; - - // otherwise, try to get it from digital ocean api - let result = match local_kubeconfig { - Some(local_kubeconfig_generated) => match File::open(&local_kubeconfig_generated) { - Ok(file) => Ok((StringPath::from(&local_kubeconfig_generated), file)), - Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), - )), - }, - None => { - let kubeconfig = match get_do_kubeconfig_by_cluster_name( - self.cloud_provider.token(), - self.doks_cluster_name().as_str(), - ) { - Ok(kubeconfig) => match kubeconfig { - None => { - return Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details, - CommandError::new_from_safe_message("Kubeconfig is empty".to_string()), - )) - } - Some(content) => content, - }, - Err(e) => { - return Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details, - CommandError::new(e.message(), Some(e.message())), - )) - } - }; - - let workspace_directory = crate::fs::workspace_directory( - self.context().workspace_root_dir(), - self.context().execution_id(), - format!("object-storage/spaces/{}", self.name()), - ) - .map_err(|err| { - EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), - CommandError::new(err.to_string(), Some(err.to_string())), - ) - }) - .expect("Unable to create directory"); - - let file_path = format!( - "{}/{}/{}", - workspace_directory, - format!("qovery-kubeconfigs-{}", self.id()), - format!("{}.yaml", self.id()) - ); - let path = Path::new(file_path.as_str()); - let parent_dir = path.parent().unwrap(); - let _ = block_on(tokio::fs::create_dir_all(parent_dir)); - - match block_on( - tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(path), - ) { - Ok(mut created_file) => match block_on(created_file.write_all(kubeconfig.as_bytes())) { - Ok(_) => { - let file = File::open(path).unwrap(); - Ok((file_path, file)) - } - Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), - )), - }, - Err(e) => Err(EngineError::new_cannot_create_file( - event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), - )), - } - } - }; - - match result { - Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details, - CommandError::new(e.message(), Some(e.message())), - )), - Ok((file_path, file)) => Ok((file_path, file)), - } - } } impl Listen for DOKS { diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index b890eb28..7994e5b7 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -15,8 +15,8 @@ pub enum VpcInitKind { impl ToString for VpcInitKind { fn to_string(&self) -> String { match self { - &VpcInitKind::Autodetect => "autodetect".to_string(), - &VpcInitKind::Manual => "manual".to_string(), + VpcInitKind::Autodetect => "autodetect".to_string(), + VpcInitKind::Manual => "manual".to_string(), } } } diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 2693886a..d8a397d1 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -11,6 +11,7 @@ use crate::errors::CommandError; use crate::utilities::calculate_hash; use semver::Version; use std::collections::HashMap; +use std::fmt::{Display, Formatter}; use std::path::Path; use std::{fs, thread}; use thread::spawn; @@ -35,9 +36,9 @@ pub enum HelmChartNamespaces { Custom, } -impl HelmChartNamespaces { - pub fn to_string(&self) -> String { - match self { +impl Display for HelmChartNamespaces { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let str = match self { HelmChartNamespaces::Custom => "custom", HelmChartNamespaces::KubeSystem => "kube-system", HelmChartNamespaces::Prometheus => "prometheus", @@ -45,8 +46,9 @@ impl HelmChartNamespaces { HelmChartNamespaces::CertManager => "cert-manager", HelmChartNamespaces::NginxIngress => "nginx-ingress", HelmChartNamespaces::Qovery => "qovery", - } - .to_string() + }; + + f.write_str(str) } } @@ -324,7 +326,7 @@ fn deploy_parallel_charts( pub fn deploy_charts_levels( kubernetes_config: &Path, - envs: &Vec<(String, String)>, + envs: &[(String, String)], charts: Vec>>, dry_run: bool, ) -> Result<(), CommandError> { diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 3487b50c..9cffdb6b 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -485,7 +485,7 @@ pub fn deploy_environment( kubernetes, service, event_details.clone(), - logger.clone(), + logger, &stateless_deployment_target, &listeners_helper, "check deployment", @@ -1212,12 +1212,12 @@ pub fn compare_kubernetes_cluster_versions_for_upgrade( messages.push("Older Kubernetes major version detected"); } - if &wished_minor_version > &deployed_minor_version { + if wished_minor_version > deployed_minor_version { upgrade_required.upgraded_required = true; messages.push("Kubernetes minor version change detected"); } - if &wished_minor_version < &deployed_minor_version { + if wished_minor_version < deployed_minor_version { upgrade_required.upgraded_required = false; upgrade_required.older_version_detected = true; messages.push("Older Kubernetes minor version detected"); @@ -1328,7 +1328,7 @@ where let listeners_helper = ListenersHelper::new(&listeners); let action = action; let progress_info = progress_info; - let waiting_message = waiting_message.unwrap_or("no message ...".to_string()); + let waiting_message = waiting_message.unwrap_or_else(|| "no message ...".to_string()); loop { // do notify users here diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index cd13f3cc..4e088c7a 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -288,7 +288,7 @@ impl Service for ApplicationScw { .image .clone() .registry_docker_json_config - .unwrap_or("".to_string()) + .unwrap_or_default() .as_str(), ); diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index e010119d..7e99be2e 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -140,9 +140,9 @@ pub fn scw_helm_charts( }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -454,11 +454,7 @@ datasources: type: loki url: \"http://{}.{}.svc:3100\" ", - prometheus_internal_url, - &loki.chart_info.name, - loki_namespace.to_string(), - &loki.chart_info.name, - loki_namespace.to_string(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace, &loki.chart_info.name, loki_namespace, ); let grafana = CommonChart { diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 3b698794..274ca894 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -248,9 +248,9 @@ impl Kapsule { // if no cluster exists let cluster_info_content = cluster_info.clusters.unwrap(); - if &cluster_info_content.len() == &0_usize { + if cluster_info_content.is_empty() { return Ok(None); - } else if &cluster_info_content.len() != &1_usize { + } else if cluster_info_content.len() != 1_usize { let msg = format!( "too many clusters found with this name, where 1 was expected. {:?}", &cluster_info_content.len() @@ -304,7 +304,7 @@ impl Kapsule { let msg = format!( "No SCW pool found from the SCW API for cluster {}/{}", &cluster_id, - &cluster_info.name.unwrap_or("unknown cluster".to_string()) + &cluster_info.name.unwrap_or_else(|| "unknown cluster".to_string()) ); return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( msg.clone(), diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 9500b890..56b531f6 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -73,7 +73,7 @@ pub trait Service: ToTransmitter { fn min_instances(&self) -> u32; fn max_instances(&self) -> u32; fn publicly_accessible(&self) -> bool; - fn fqdn<'a>(&self, target: &DeploymentTarget, fqdn: &'a String, is_managed: bool) -> String { + fn fqdn(&self, target: &DeploymentTarget, fqdn: &str, is_managed: bool) -> String { match &self.publicly_accessible() { true => fqdn.to_string(), false => match is_managed { @@ -446,13 +446,13 @@ where crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry( kubernetes_config_file_path.as_str(), environment.namespace(), - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ) .map_err(|e| { EngineError::new_k8s_pod_not_ready( event_details.clone(), - service.selector().unwrap_or("".to_string()), + service.selector().unwrap_or_default(), environment.namespace().to_string(), e, ) @@ -522,13 +522,13 @@ pub fn scale_down_application( kubernetes.cloud_provider().credentials_environment_variables(), environment.namespace(), scaling_kind, - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), replicas_count as u32, ) .map_err(|e| { EngineError::new_k8s_scale_replicas( event_details.clone(), - service.selector().unwrap_or("".to_string()), + service.selector().unwrap_or_default(), environment.namespace().to_string(), replicas_count as u32, e, @@ -728,7 +728,7 @@ where let is_pod_ready = crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry( &kubernetes_config_file_path, environment.namespace(), - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); if let Ok(Some(true)) = is_pod_ready { @@ -1139,7 +1139,7 @@ pub fn get_stateless_resource_information_for_user( where T: Service + ?Sized, { - let selector = service.selector().unwrap_or("".to_string()); + let selector = service.selector().unwrap_or_default(); let mut result = Vec::with_capacity(50); let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; @@ -1308,7 +1308,7 @@ where let listeners_helper = ListenersHelper::new(&listeners); let action = action; let progress_info = progress_info; - let waiting_message = waiting_message.clone().unwrap_or("No message...".to_string()); + let waiting_message = waiting_message.clone().unwrap_or_else(|| "No message...".to_string()); loop { // do notify users here diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index ccc5c432..cc0bf073 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -1,3 +1,5 @@ +#![allow(clippy::field_reassign_with_default)] + use std::collections::HashMap; use crate::errors::{CommandError, EngineError}; @@ -12,6 +14,7 @@ use retry::delay::Fixed; use retry::OperationResult; use serde::{Deserialize, Serialize}; use std::fmt; +use std::fmt::Write; use std::str::FromStr; use trust_dns_resolver::config::*; use trust_dns_resolver::proto::rr::{RData, RecordType}; @@ -232,22 +235,6 @@ impl VersionsNumber { } } - pub fn to_string(&self) -> String { - let mut version = vec![self.major.to_string()]; - - if self.minor.is_some() { - version.push(self.minor.clone().unwrap()) - } - if self.patch.is_some() { - version.push(self.patch.clone().unwrap()) - } - if self.suffix.is_some() { - version.push(self.suffix.clone().unwrap()) - } - - version.join(".") - } - pub fn to_major_version_string(&self) -> String { self.major.clone() } @@ -306,7 +293,24 @@ impl FromStr for VersionsNumber { impl fmt::Display for VersionsNumber { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.to_string()) + f.write_str(&self.major)?; + + if let Some(minor) = &self.minor { + f.write_char('.')?; + f.write_str(minor)?; + } + + if let Some(patch) = &self.patch { + f.write_char('.')?; + f.write_str(patch)?; + } + + if let Some(suffix) = &self.suffix { + f.write_char('.')?; + f.write_str(suffix)?; + } + + Ok(()) } } diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 364c5488..a49da2ec 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -339,14 +339,14 @@ mod tests { fn test_run_version_for_command() { let ret = run_version_command_for("ls"); assert_eq!(ret.is_empty(), false); - assert_eq!(ret.contains("GNU"), true) + assert!(ret.contains("GNU")) } #[test] fn test_error() { let mut cmd = QoveryCommand::new("false", &[], &[]); assert_eq!(cmd.exec().is_err(), true); - assert_eq!(matches!(cmd.exec(), Err(CommandError::ExitStatusError(_))), true); + assert!(matches!(cmd.exec(), Err(CommandError::ExitStatusError(_)))); } #[test] @@ -375,7 +375,7 @@ mod tests { &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2)), ); - assert_eq!(ret.is_ok(), true); + assert!(ret.is_ok()); } #[test] diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 835878c0..e55517bf 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -88,7 +88,7 @@ impl Docker { &mut |_| {}, &CommandKiller::never(), ); - if let Err(_) = buildx_cmd_exist { + if buildx_cmd_exist.is_err() { return Err(DockerError::InvalidConfig( "Docker buildx plugin for buildkit is not correctly installed".to_string(), )); diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index ad1f998d..4fafb7e4 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -399,8 +399,8 @@ where } // additional labels - if labels.is_some() { - match kubectl_add_labels_to_namespace(kubernetes_config, namespace, labels.unwrap(), envs) { + if let Some(..) = labels { + match kubectl_add_labels_to_namespace(kubernetes_config, namespace, labels.unwrap_or_default(), envs) { Ok(_) => {} Err(e) => return Err(e), } @@ -456,7 +456,7 @@ where pub fn does_contain_terraform_tfstate

( kubernetes_config: P, namespace: &str, - envs: &Vec<(&str, &str)>, + envs: &[(&str, &str)], ) -> Result where P: AsRef, @@ -671,18 +671,18 @@ pub fn kubectl_exec_rollout_restart_deployment

( kubernetes_config: P, name: &str, namespace: &str, - envs: &Vec<(&str, &str)>, + envs: &[(&str, &str)], ) -> Result<(), CommandError> where P: AsRef, { - let mut environment_variables: Vec<(&str, &str)> = envs.clone(); + let mut environment_variables: Vec<(&str, &str)> = envs.to_owned(); environment_variables.push(("KUBECONFIG", kubernetes_config.as_ref().to_str().unwrap())); let args = vec!["-n", namespace, "rollout", "restart", "deployment", name]; kubectl_exec_with_output( args, - environment_variables.clone(), + environment_variables, &mut |line| info!("{}", line), &mut |line| error!("{}", line), ) diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index 7d9fa05c..2c0a018c 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -9,7 +9,7 @@ use rand::Rng; use retry::Error::Operation; use std::{env, fs, thread, time}; -fn manage_common_issues(terraform_provider_lock: &String, err: &CommandError) -> Result<(), CommandError> { +fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Result<(), CommandError> { // Error: Failed to install provider from shared cache // in order to avoid lock errors on parallel run, let's sleep a bit // https://github.com/hashicorp/terraform/issues/28041 @@ -249,7 +249,7 @@ in the dependency lock file "#; let could_not_load_plugin_error = CommandError::new_from_safe_message(could_not_load_plugin.to_string()); - assert!(manage_common_issues(&"/tmp/do_not_exists".to_string(), &could_not_load_plugin_error).is_ok()); + assert!(manage_common_issues("/tmp/do_not_exists", &could_not_load_plugin_error).is_ok()); } #[test] diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 777f2213..6e6f3bd5 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -1,3 +1,5 @@ +#![allow(clippy::field_reassign_with_default)] + use std::str::FromStr; use rusoto_core::{Client, HttpClient, Region, RusotoError}; @@ -230,8 +232,8 @@ impl ECR { fn get_or_create_repository(&self, repository_name: &str) -> Result { // check if the repository already exists let repository = self.get_repository(repository_name); - if repository.is_some() { - return Ok(repository.unwrap()); + if let Some(repo) = repository { + return Ok(repo); } self.create_repository(repository_name) diff --git a/src/error.rs b/src/error.rs index a64601ab..81afaf4d 100644 --- a/src/error.rs +++ b/src/error.rs @@ -112,10 +112,10 @@ pub fn cast_simple_error_to_engine_error>( let message = match simple_error.kind { SimpleErrorKind::Command(exit_status) => format!( "{} ({})", - simple_error.message.unwrap_or("".into()), + simple_error.message.unwrap_or_else(|| "".into()), exit_status ), - SimpleErrorKind::Other => simple_error.message.unwrap_or("".into()), + SimpleErrorKind::Other => simple_error.message.unwrap_or_else(|| "".into()), }; Err(EngineError::new( diff --git a/src/errors/io.rs b/src/errors/io.rs index eddd9621..42501a16 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -12,7 +12,7 @@ pub struct CommandError { impl From for CommandError { fn from(error: errors::CommandError) -> Self { CommandError { - message: error.message_safe.unwrap_or("".to_string()), + message: error.message_safe.unwrap_or_default(), message_unsafe: error.message_raw, } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 6e0fb834..02ba96a3 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -407,7 +407,7 @@ impl EngineError { /// Creates new engine error from legacy engine error easing migration. pub fn new_from_legacy_engine_error(e: LegacyEngineError) -> Self { - let message = e.message.unwrap_or("".to_string()); + let message = e.message.unwrap_or_default(); EngineError { tag: Tag::Unknown, event_details: EventDetails::new( diff --git a/src/events/mod.rs b/src/events/mod.rs index 02664ebb..0ef04c6e 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::field_reassign_with_default)] +#![allow(clippy::large_enum_variant)] #![allow(deprecated)] pub mod io; diff --git a/src/fs.rs b/src/fs.rs index 6d3fc428..43cae13e 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -107,7 +107,7 @@ pub fn cleanup_workspace_directory(working_root_dir: &str, execution_id: &str) - return match crate::fs::root_workspace_directory(working_root_dir, execution_id) { Ok(workspace_dir) => match std::fs::remove_dir_all(match workspace_dir.strip_suffix("/.") { Some(striped_workspace_dir) => striped_workspace_dir, // Removing extra dir name allowing to delete directory properly ("/dir/." => "dir") - None => workspace_dir.as_str().clone(), + None => &workspace_dir, }) { Ok(_) => Ok(()), Err(err) => { diff --git a/src/git.rs b/src/git.rs index e41a1010..bebf0d19 100644 --- a/src/git.rs +++ b/src/git.rs @@ -11,7 +11,7 @@ use url::Url; // or an error to specify that we have exhausted everything we are able to provide fn authentication_callback<'a>( get_credentials: &'a impl Fn(&str) -> Vec<(CredentialType, Cred)>, -) -> impl FnMut(&str, Option<&str>, CredentialType) -> Result + 'a { +) -> impl FnMut(&str, Option<&str>, CredentialType) -> Result + '_ { let mut current_credentials: (String, Vec<(CredentialType, Cred)>) = ("".into(), vec![]); move |remote_url, username_from_url, allowed_types| { diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index ae60c442..886d962f 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -324,7 +324,7 @@ impl ObjectStorage for S3 { match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { return Err(ObjectStorageError::CannotReadFile { diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index c82edc4e..664af26f 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -357,7 +357,7 @@ impl ObjectStorage for ScalewayOS { match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { return Err(ObjectStorageError::CannotReadFile { diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index 5ad92541..1785b29e 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -353,7 +353,7 @@ impl ObjectStorage for Spaces { match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { return Err(ObjectStorageError::CannotReadFile { diff --git a/src/template.rs b/src/template.rs index 6f86abf5..cd71541a 100644 --- a/src/template.rs +++ b/src/template.rs @@ -83,12 +83,11 @@ where .follow_links(true) .into_iter() .filter_map(|e| e.ok()) - .filter(|e| e.file_name().to_str().map(|s| s.contains(".j2.")).unwrap_or(false)) - .collect::>(); + .filter(|e| e.file_name().to_str().map(|s| s.contains(".j2.")).unwrap_or(false)); let mut results: Vec = vec![]; - for file in files.into_iter() { + for file in files { let path_str = file.path().to_str().unwrap(); let j2_path = path_str.replace(root_dir_str, ""); diff --git a/src/transaction.rs b/src/transaction.rs index 32182280..8e163229 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -222,19 +222,19 @@ impl<'a> Transaction<'a> { Step::CreateKubernetes => { // revert kubernetes creation if let Err(err) = self.engine.kubernetes().on_create_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::DeleteKubernetes => { // revert kubernetes deletion if let Err(err) = self.engine.kubernetes().on_delete_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::PauseKubernetes => { // revert pause if let Err(err) = self.engine.kubernetes().on_pause_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::BuildEnvironment(_environment_action, _option) => { @@ -268,7 +268,7 @@ impl<'a> Transaction<'a> { let _ = match action { Ok(_) => {} - Err(err) => return Err(RollbackError::CommitError(err)), + Err(err) => return Err(RollbackError::CommitError(Box::new(err))), }; Err(RollbackError::NoFailoverEnvironment) @@ -634,7 +634,7 @@ impl Clone for Step { #[derive(Debug)] pub enum RollbackError { - CommitError(EngineError), + CommitError(Box), NoFailoverEnvironment, Nothing, } From 4856c6a008c8dfe04a4a3816d02202a7708f05ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 13:38:54 +0100 Subject: [PATCH 61/85] Update tests.yml --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b699374e..3ebe3b8a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -36,7 +36,7 @@ jobs: sccache --show-stats echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) - RUSTFLAGS="--deny warnings" cargo check || (echo "Solve your warnings to continue"; exit 1) + RUSTFLAGS="--deny warnings" cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) echo "########## START BUILD ##########" cargo build --all-features sccache --show-stats From 078c98f752dd6b0627467b2e7822244a9e22e992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 13:50:27 +0100 Subject: [PATCH 62/85] Update tests.yml --- .github/workflows/tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3ebe3b8a..698bc815 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -36,6 +36,7 @@ jobs: sccache --show-stats echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) + rustup component add clippy RUSTFLAGS="--deny warnings" cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) echo "########## START BUILD ##########" cargo build --all-features From 1c053a00a945b8b4296a788934f102f9a5ba2924 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 13:51:36 +0100 Subject: [PATCH 63/85] Update tests.yml --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 698bc815..dbcecd2a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,7 +37,7 @@ jobs: echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) rustup component add clippy - RUSTFLAGS="--deny warnings" cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) + cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) echo "########## START BUILD ##########" cargo build --all-features sccache --show-stats From 33882f9b2825bccf29753a5dd25950214f60eda8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 13:59:49 +0100 Subject: [PATCH 64/85] Update tests.yml --- .github/workflows/tests.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index dbcecd2a..1dae69a1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,6 +23,11 @@ jobs: terraform_version: 0.14.10 - name: build-linter-utests run: | + echo "########## LINTER ##########" + cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) + rustup component add clippy + cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) + export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache export SCCACHE_REDIS=${{ secrets.SCCACHE_REDIS }} @@ -34,10 +39,6 @@ jobs: echo "########## SHARED CACHE STATUS ##########" sccache --version sccache --show-stats - echo "########## LINTER ##########" - cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) - rustup component add clippy - cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) echo "########## START BUILD ##########" cargo build --all-features sccache --show-stats From 6597b0546a47858cc7188ad7784d7df5af1b71a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 14:10:54 +0100 Subject: [PATCH 65/85] Update tests.yml --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1dae69a1..4734a60b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -26,7 +26,7 @@ jobs: echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) rustup component add clippy - cargo clippy --all --all-features --lib || (echo "Solve your clippy warnings to continue"; exit 1) + cargo clippy --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache From 745f3d75c3f0d0f5eab63c1cab54547fbdc32243 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 14:34:08 +0100 Subject: [PATCH 66/85] Bump test --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4734a60b..7eb52ed9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -26,7 +26,7 @@ jobs: echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) rustup component add clippy - cargo clippy --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) + cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache From 2b7005c48979488575e362ef0616a8c8a68eca63 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 14:43:37 +0100 Subject: [PATCH 67/85] fmt --- rustfmt.toml | 2 + src/build_platform/local_docker.rs | 39 +-- src/build_platform/mod.rs | 7 +- src/cloud_provider/aws/application.rs | 12 +- src/cloud_provider/aws/databases/mongodb.rs | 23 +- src/cloud_provider/aws/databases/mysql.rs | 23 +- .../aws/databases/postgresql.rs | 23 +- src/cloud_provider/aws/databases/redis.rs | 23 +- .../aws/kubernetes/helm_charts.rs | 20 +- src/cloud_provider/aws/kubernetes/mod.rs | 191 ++++----------- src/cloud_provider/aws/kubernetes/roles.rs | 5 +- src/cloud_provider/aws/mod.rs | 11 +- src/cloud_provider/aws/regions.rs | 5 +- .../digitalocean/application.rs | 17 +- .../digitalocean/databases/mongodb.rs | 23 +- .../digitalocean/databases/mysql.rs | 23 +- .../digitalocean/databases/postgresql.rs | 23 +- .../digitalocean/databases/redis.rs | 23 +- .../digitalocean/do_api_common.rs | 6 +- .../digitalocean/kubernetes/cidr.rs | 5 +- .../digitalocean/kubernetes/doks_api.rs | 10 +- .../digitalocean/kubernetes/helm_charts.rs | 15 +- .../digitalocean/kubernetes/mod.rs | 172 ++++---------- src/cloud_provider/digitalocean/mod.rs | 4 +- .../digitalocean/network/vpc.rs | 26 +- src/cloud_provider/digitalocean/router.rs | 5 +- src/cloud_provider/helm.rs | 35 +-- src/cloud_provider/kubernetes.rs | 86 ++----- src/cloud_provider/qovery.rs | 15 +- src/cloud_provider/scaleway/application.rs | 17 +- .../scaleway/databases/mongodb.rs | 30 +-- .../scaleway/databases/mysql.rs | 23 +- .../scaleway/databases/postgresql.rs | 23 +- .../scaleway/databases/redis.rs | 30 +-- .../scaleway/kubernetes/helm_charts.rs | 15 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 178 ++++---------- src/cloud_provider/service.rs | 57 ++--- src/cloud_provider/utilities.rs | 71 ++---- src/cmd/command.rs | 29 +-- src/cmd/docker.rs | 41 +--- src/cmd/helm.rs | 22 +- src/cmd/kubectl.rs | 75 ++---- src/cmd/structs.rs | 10 +- src/cmd/terraform.rs | 15 +- src/container_registry/docr.rs | 7 +- src/container_registry/ecr.rs | 7 +- src/dns_provider/cloudflare.rs | 4 +- src/error.rs | 15 +- src/errors/mod.rs | 223 ++++-------------- src/events/mod.rs | 76 ++---- src/fs.rs | 18 +- src/git.rs | 48 +--- src/logger.rs | 35 +-- src/models.rs | 27 +-- src/object_storage/s3.rs | 6 +- src/template.rs | 7 +- src/transaction.rs | 40 +--- test_utilities/src/aws.rs | 9 +- test_utilities/src/common.rs | 25 +- test_utilities/src/digitalocean.rs | 17 +- test_utilities/src/scaleway.rs | 19 +- test_utilities/src/utilities.rs | 70 +----- tests/aws/aws_databases.rs | 5 +- tests/aws/aws_environment.rs | 48 +--- tests/aws/aws_kubernetes.rs | 5 +- tests/aws/aws_s3.rs | 28 +-- tests/digitalocean/do_databases.rs | 22 +- tests/digitalocean/do_environment.rs | 57 +---- tests/digitalocean/do_spaces.rs | 16 +- tests/scaleway/scw_container_registry.rs | 15 +- tests/scaleway/scw_databases.rs | 22 +- tests/scaleway/scw_environment.rs | 73 +----- tests/scaleway/scw_object_storage.rs | 7 +- 73 files changed, 524 insertions(+), 1935 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 9328e6c1..62a48d97 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,4 @@ max_width = 120 +fn_call_width = 100 +#attr_fn_like_width = 80 edition = "2018" diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 4a1abdb5..4c6d00b2 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -131,11 +131,7 @@ impl LocalDocker { // Going to inject only env var that are used by the dockerfile // so extracting it and modifying the image tag and env variables let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { - BuildError::IoError( - build.image.application_id.clone(), - "reading dockerfile content".to_string(), - err, - ) + BuildError::IoError(build.image.application_id.clone(), "reading dockerfile content".to_string(), err) })?; let dockerfile_args = match extract_dockerfile_args(dockerfile_content) { Ok(dockerfile_args) => dockerfile_args, @@ -167,10 +163,7 @@ impl LocalDocker { let image_name = image_to_build.image_name(); log_info(format!("🕵️ Checking if image already exist remotely {}", image_name)); if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { - log_info(format!( - "🎯 Skipping build. Image already exist in the registry {}", - image_name - )); + log_info(format!("🎯 Skipping build. Image already exist in the registry {}", image_name)); // skip build return Ok(()); @@ -331,11 +324,7 @@ impl LocalDocker { format!("build/{}", build.image.name.as_str()), ) .map_err(|err| { - BuildError::IoError( - build.image.application_id.clone(), - "when creating build workspace".to_string(), - err, - ) + BuildError::IoError(build.image.application_id.clone(), "when creating build workspace".to_string(), err) }) } } @@ -369,20 +358,15 @@ impl BuildPlatform for LocalDocker { // LOGGING let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); - let msg = format!( - "📥 Cloning repository: {} to {:?}", - build.git_repository.url, repository_root_path - ); + let msg = format!("📥 Cloning repository: {} to {:?}", build.git_repository.url, repository_root_path); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: app_id.clone() }, ProgressLevel::Info, Some(msg.clone()), self.context.execution_id(), )); - self.logger.log( - LogLevel::Info, - EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), - ); + self.logger + .log(LogLevel::Info, EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))); // LOGGING // Create callback that will be called by git to provide credentials per user @@ -398,10 +382,7 @@ impl BuildPlatform for LocalDocker { } if let Some(Credentials { login, password }) = &build.git_repository.credentials { - creds.push(( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(login, password).unwrap(), - )); + creds.push((CredentialType::USER_PASS_PLAINTEXT, Cred::userpass_plaintext(login, password).unwrap())); } creds @@ -467,10 +448,8 @@ impl BuildPlatform for LocalDocker { // If the dockerfile does not exist, abort if !dockerfile_absolute_path.is_file() { - let msg = format!( - "Specified dockerfile path {:?} does not exist within the repository", - &dockerfile_path - ); + let msg = + format!("Specified dockerfile path {:?} does not exist within the repository", &dockerfile_path); return Err(BuildError::InvalidConfig(app_id, msg)); } diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index be1ce938..eeedb4a1 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -138,12 +138,7 @@ impl Image { &self.repository_name } pub fn full_image_name_with_tag(&self) -> String { - format!( - "{}/{}:{}", - self.registry_url.host_str().unwrap_or_default(), - self.name, - self.tag - ) + format!("{}/{}:{}", self.registry_url.host_str().unwrap_or_default(), self.name, self.tag) } pub fn full_image_name(&self) -> String { diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs index aa37c36e..49f3230f 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/cloud_provider/aws/application.rs @@ -270,10 +270,7 @@ impl Service for ApplicationAws { context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -341,12 +338,7 @@ impl Pause for ApplicationAws { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) + scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) }) } diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 4fc2b6d4..230bc3d8 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -204,10 +204,7 @@ impl Service for MongoDbAws { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -229,10 +226,7 @@ impl Service for MongoDbAws { context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -251,11 +245,7 @@ impl Database for MongoDbAws {} impl ToTransmitter for MongoDbAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -311,12 +301,7 @@ impl Create for MongoDbAws { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs index 2fdf6bd6..d645aa6d 100644 --- a/src/cloud_provider/aws/databases/mysql.rs +++ b/src/cloud_provider/aws/databases/mysql.rs @@ -105,11 +105,7 @@ impl StatefulService for MySQLAws { impl ToTransmitter for MySQLAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -223,10 +219,7 @@ impl Service for MySQLAws { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -248,10 +241,7 @@ impl Service for MySQLAws { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -320,12 +310,7 @@ impl Create for MySQLAws { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs index dc4d8d65..07ec3678 100644 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ b/src/cloud_provider/aws/databases/postgresql.rs @@ -105,11 +105,7 @@ impl StatefulService for PostgreSQLAws { impl ToTransmitter for PostgreSQLAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -209,10 +205,7 @@ impl Service for PostgreSQLAws { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_name", self.sanitized_name().as_str()); context.insert("database_db_name", self.name()); @@ -236,10 +229,7 @@ impl Service for PostgreSQLAws { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -308,12 +298,7 @@ impl Create for PostgreSQLAws { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs index 3f71f08b..9c90501b 100644 --- a/src/cloud_provider/aws/databases/redis.rs +++ b/src/cloud_provider/aws/databases/redis.rs @@ -102,11 +102,7 @@ impl StatefulService for RedisAws { impl ToTransmitter for RedisAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -229,10 +225,7 @@ impl Service for RedisAws { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -252,10 +245,7 @@ impl Service for RedisAws { context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -324,12 +314,7 @@ impl Create for RedisAws { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 9024cc79..6e9f7b71 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -67,10 +67,7 @@ pub fn aws_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), - )); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -79,14 +76,8 @@ pub fn aws_helm_charts( let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); } }; @@ -1357,10 +1348,7 @@ impl AwsVpcCniChart { "Error while getting daemonset info for chart {}, won't deploy CNI chart.", &self.chart_info.name ); - Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )) + Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))) } } } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 0a2d90ff..11676b2f 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -345,10 +345,7 @@ impl EKS { } VpcQoveryNetworkMode::WithoutNatGateways => {} }; - context.insert( - "vpc_qovery_network_mode", - &self.options.vpc_qovery_network_mode.to_string(), - ); + context.insert("vpc_qovery_network_mode", &self.options.vpc_qovery_network_mode.to_string()); let rds_zone_a_subnet_blocks = format_ips(&self.options.rds_zone_a_subnet_blocks); let rds_zone_b_subnet_blocks = format_ips(&self.options.rds_zone_b_subnet_blocks); @@ -383,33 +380,18 @@ impl EKS { context.insert("organization_id", self.cloud_provider.organization_id()); context.insert("qovery_api_url", &qovery_api_url); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); context.insert("test_cluster", &self.context.is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } context.insert("force_upgrade", &self.context.requires_forced_upgrade()); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); - context.insert( - "metrics_history_enabled", - &self.context.is_feature_enabled(&Features::MetricsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); + context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); // DNS configuration let managed_dns_list = vec![self.dns_provider.name()]; @@ -422,22 +404,10 @@ impl EKS { context.insert("managed_dns", &managed_dns_list); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); - context.insert( - "managed_dns_domains_root_terraform_format", - &managed_dns_domains_root_terraform_format, - ); - context.insert( - "managed_dns_resolvers_terraform_format", - &managed_dns_resolvers_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); + context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); + context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { @@ -508,10 +478,8 @@ impl EKS { .secret_access_key .as_str(), ); - context.insert( - "aws_region_tfstates_account", - self.cloud_provider().terraform_state_credentials().region.as_str(), - ); + context + .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); context.insert("aws_region", &self.region()); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); @@ -555,18 +523,9 @@ impl EKS { // AWS - Elasticsearch context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); - context.insert( - "elasticsearch_zone_a_subnet_blocks", - &elasticsearch_zone_a_subnet_blocks, - ); - context.insert( - "elasticsearch_zone_b_subnet_blocks", - &elasticsearch_zone_b_subnet_blocks, - ); - context.insert( - "elasticsearch_zone_c_subnet_blocks", - &elasticsearch_zone_c_subnet_blocks, - ); + context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); + context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); + context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); // grafana credentials context.insert("grafana_admin_user", self.options.grafana_admin_user.as_str()); @@ -738,19 +697,13 @@ impl EKS { } Err(e) => self.logger().log( LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), + EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), ), }; // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } // kubernetes helm deployments on the cluster @@ -831,10 +784,9 @@ impl EKS { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), - ), + Ok(ok_line) => self + .logger() + .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None))), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -1032,16 +984,11 @@ impl EKS { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message))); Ok(()) } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )), + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), } } @@ -1125,16 +1072,11 @@ impl EKS { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = format!( - "Ensuring everything is up to date before deleting cluster {}/{}", - self.name(), - self.id() - ); + let message = + format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1219,10 +1161,8 @@ impl EKS { } } Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); + let message_safe = + format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1245,11 +1185,9 @@ impl EKS { ); // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubernetes_config_file_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = + Helm::new(&kubernetes_config_file_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1387,10 +1325,8 @@ impl EKS { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1420,10 +1356,9 @@ impl EKS { ); Ok(()) } - Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - error, - )), + Err(Operation { error, .. }) => { + Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) + } Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1538,12 +1473,7 @@ impl Kubernetes for EKS { let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!( - "Start preparing EKS upgrade process {} cluster with id {}", - self.name(), - self.id() - ) - .as_str(), + format!("Start preparing EKS upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); self.logger().log( @@ -1565,12 +1495,7 @@ impl Kubernetes for EKS { match &kubernetes_upgrade_status.required_upgrade_on { Some(KubernetesNodesType::Masters) => { self.send_to_customer( - format!( - "Start upgrading process for master nodes on {}/{}", - self.name(), - self.id() - ) - .as_str(), + format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), &listeners_helper, ); self.logger().log( @@ -1635,11 +1560,7 @@ impl Kubernetes for EKS { match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => { self.send_to_customer( - format!( - "Kubernetes {} master nodes have been successfully upgraded", - self.name() - ) - .as_str(), + format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); self.logger().log( @@ -1653,10 +1574,7 @@ impl Kubernetes for EKS { ); } Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } } @@ -1700,11 +1618,7 @@ impl Kubernetes for EKS { // Upgrade worker nodes // self.send_to_customer( - format!( - "Preparing workers nodes for upgrade for Kubernetes cluster {}", - self.name() - ) - .as_str(), + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); self.logger().log( @@ -1717,10 +1631,7 @@ impl Kubernetes for EKS { // disable cluster autoscaler to avoid interfering with AWS upgrade procedure context.insert("enable_cluster_autoscaler", &false); - context.insert( - "eks_workers_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); + context.insert("eks_workers_version", format!("{}", &kubernetes_upgrade_status.requested_version).as_str()); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1750,10 +1661,7 @@ impl Kubernetes for EKS { )); } - self.send_to_customer( - format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), - &listeners_helper, - ); + self.send_to_customer(format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), &listeners_helper); self.logger().log( LogLevel::Info, EngineEvent::Deploying( @@ -1768,11 +1676,7 @@ impl Kubernetes for EKS { match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => { self.send_to_customer( - format!( - "Kubernetes {} workers nodes have been successfully upgraded", - self.name() - ) - .as_str(), + format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); self.logger().log( @@ -1789,10 +1693,7 @@ impl Kubernetes for EKS { // enable cluster autoscaler deployment let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } diff --git a/src/cloud_provider/aws/kubernetes/roles.rs b/src/cloud_provider/aws/kubernetes/roles.rs index e4593425..99439ae3 100644 --- a/src/cloud_provider/aws/kubernetes/roles.rs +++ b/src/cloud_provider/aws/kubernetes/roles.rs @@ -79,10 +79,7 @@ impl Role { Ok(_) => Ok(true), Err(e) => { let safe_message = format!("Unable to know if `{}` exist on AWS Account", &self.role_name); - return Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message), - )); + return Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message))); } }; } diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index d40d83ae..3323de5e 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -58,12 +58,7 @@ impl AWS { } pub fn credentials(&self) -> StaticProvider { - StaticProvider::new( - self.access_key_id.to_string(), - self.secret_access_key.to_string(), - None, - None, - ) + StaticProvider::new(self.access_key_id.to_string(), self.secret_access_key.to_string(), None, None) } pub fn client(&self) -> Client { @@ -115,9 +110,7 @@ impl CloudProvider for AWS { match s { Ok(_x) => Ok(()), - Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )), + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials(event_details)), } } diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index 3ede17b0..8a942c26 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -429,10 +429,7 @@ mod tests { let current_zone = AwsZones::from_string(sanitized_zone.to_lowercase()); assert_eq!(current_zone.unwrap(), zone); } - assert_eq!( - AwsZones::from_string("eu-west-3x".to_string()), - Err(RegionAndZoneErrors::ZoneNotSupported) - ); + assert_eq!(AwsZones::from_string("eu-west-3x".to_string()), Err(RegionAndZoneErrors::ZoneNotSupported)); } #[test] diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs index 2e96c929..c6d2784b 100644 --- a/src/cloud_provider/digitalocean/application.rs +++ b/src/cloud_provider/digitalocean/application.rs @@ -273,10 +273,7 @@ impl Service for ApplicationDo { context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -345,12 +342,7 @@ impl Pause for ApplicationDo { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) + scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) }) } @@ -505,10 +497,7 @@ impl FromStr for DoRegion { "tor1" => Ok(DoRegion::Toronto), "blr1" => Ok(DoRegion::Bangalore), _ => { - return Err(CommandError::new_from_safe_message(format!( - "`{}` region is not supported", - s - ))); + return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); } } } diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index 9a257482..a526f2bf 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -66,12 +66,7 @@ impl MongoDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mongodb_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for MongoDo { impl ToTransmitter for MongoDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -196,10 +187,7 @@ impl Service for MongoDo { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -217,10 +205,7 @@ impl Service for MongoDo { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index cd92f716..f49b78a0 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -66,12 +66,7 @@ impl MySQLDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mysql_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_mysql_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for MySQLDo { impl ToTransmitter for MySQLDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -196,10 +187,7 @@ impl Service for MySQLDo { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -217,10 +205,7 @@ impl Service for MySQLDo { context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index b993f2ef..44b0309f 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -66,12 +66,7 @@ impl PostgresDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_postgres_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_postgres_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for PostgresDo { impl ToTransmitter for PostgresDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -196,10 +187,7 @@ impl Service for PostgresDo { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name()); context.insert("database_login", self.options.login.as_str()); @@ -219,10 +207,7 @@ impl Service for PostgresDo { context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 709c0e69..3437f4a5 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -66,12 +66,7 @@ impl RedisDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_redis_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_redis_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for RedisDo { impl ToTransmitter for RedisDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -196,10 +187,7 @@ impl Service for RedisDo { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -216,10 +204,7 @@ impl Service for RedisDo { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) diff --git a/src/cloud_provider/digitalocean/do_api_common.rs b/src/cloud_provider/digitalocean/do_api_common.rs index 69471800..296f9202 100644 --- a/src/cloud_provider/digitalocean/do_api_common.rs +++ b/src/cloud_provider/digitalocean/do_api_common.rs @@ -37,10 +37,8 @@ pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Res match response.status() { StatusCode::OK => Ok(response.text().expect("Cannot get response text")), StatusCode::UNAUTHORIZED => { - let message_safe = format!( - "Could not get {} information, ensure your DigitalOcean token is valid.", - api_type - ); + let message_safe = + format!("Could not get {} information, ensure your DigitalOcean token is valid.", api_type); return Err(CommandError::new( format!("{}, response: {:?}", message_safe, response), Some(message_safe), diff --git a/src/cloud_provider/digitalocean/kubernetes/cidr.rs b/src/cloud_provider/digitalocean/kubernetes/cidr.rs index 8184403b..ff2ac779 100644 --- a/src/cloud_provider/digitalocean/kubernetes/cidr.rs +++ b/src/cloud_provider/digitalocean/kubernetes/cidr.rs @@ -19,10 +19,7 @@ pub fn get_used_cidr_on_region(token: &str) { let mut cmd = QoveryCommand::new("doctl", &["vpcs", "list", "--output", "json", "-t", token], &[]); let _ = cmd.exec_with_output(&mut |r_out| output_from_cli.push_str(&r_out), &mut |r_err| { - error!( - "DOCTL CLI error from cmd inserted, please check vpcs list command{}", - r_err - ) + error!("DOCTL CLI error from cmd inserted, please check vpcs list command{}", r_err) }); let buff = output_from_cli.borrow(); diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 3f393de8..c87b3f1e 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -26,10 +26,7 @@ pub fn get_doks_info_from_name( } Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new( - format!("{}, error: {}", safe_message, e), - Some(safe_message.to_string()), - )); + return Err(CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()))); } } } @@ -50,10 +47,7 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result Ok(options.options.versions), Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new( - format!("{}, error: {}", safe_message, e), - Some(safe_message.to_string()), - )); + return Err(CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()))); } } } diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 44d789e3..14024738 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -121,10 +121,7 @@ pub fn do_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), - )); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -133,14 +130,8 @@ pub fn do_helm_charts( let qovery_terraform_config: DigitalOceanQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); } }; diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 7682ba7f..73b93782 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -246,22 +246,10 @@ impl DOKS { context.insert("do_loadbalancer_hostname", &self.do_loadbalancer_hostname()); context.insert("managed_dns_domain", self.dns_provider.domain().to_string().as_str()); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); - context.insert( - "managed_dns_domains_root_terraform_format", - &managed_dns_domains_root_terraform_format, - ); - context.insert( - "managed_dns_resolvers_terraform_format", - &managed_dns_resolvers_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); + context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); + context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); @@ -276,10 +264,7 @@ impl DOKS { context.insert("test_cluster", &self.context.is_test_cluster()); context.insert("doks_cluster_id", &self.id()); context.insert("doks_master_name", &self.name()); - context.insert( - "doks_version", - self.get_supported_doks_version(event_details.clone())?.as_str(), - ); + context.insert("doks_version", self.get_supported_doks_version(event_details.clone())?.as_str()); context.insert("do_space_kubeconfig_filename", &self.kubeconfig_file_name()); // Network @@ -291,15 +276,9 @@ impl DOKS { context.insert("object_storage_kubeconfig_bucket", &self.kubeconfig_bucket_name()); context.insert("object_storage_logs_bucket", &self.logs_bucket_name()); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); context.insert("test_cluster", &self.context.is_test_cluster()); context.insert("qovery_api_url", self.options.qovery_api_url.as_str()); @@ -310,19 +289,10 @@ impl DOKS { context.insert("discord_api_key", self.options.discord_api_key.as_str()); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); - context.insert( - "metrics_history_enabled", - &self.context.is_feature_enabled(&Features::MetricsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); + context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } // grafana credentials @@ -350,10 +320,8 @@ impl DOKS { .as_str(), ); - context.insert( - "aws_region_tfstates_account", - self.cloud_provider().terraform_state_credentials().region.as_str(), - ); + context + .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); context.insert("nginx_enable_horizontal_autoscaler", "true"); context.insert("nginx_minimum_replicas", "2"); @@ -441,11 +409,7 @@ impl DOKS { } fn do_loadbalancer_hostname(&self) -> String { - format!( - "qovery-nginx-{}.{}", - self.cloud_provider.id(), - self.dns_provider().domain() - ) + format!("qovery-nginx-{}.{}", self.cloud_provider.id(), self.dns_provider().domain()) } fn lets_encrypt_url(&self) -> String { @@ -463,9 +427,7 @@ impl DOKS { // TODO(benjaminch): `qovery-` to be added into Rust name directly everywhere match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id())) { Ok(cluster_result) => match cluster_result { - None => Err(CommandError::new_from_safe_message( - "Cluster doesn't exist on DO side.".to_string(), - )), + None => Err(CommandError::new_from_safe_message("Cluster doesn't exist on DO side.".to_string())), Some(cluster) => Ok(cluster), }, Err(e) => Err(e), @@ -481,11 +443,7 @@ impl DOKS { execution_id: self.context.execution_id().to_string(), }, ProgressLevel::Info, - Some(format!( - "start to create Digital Ocean Kubernetes cluster {} with id {}", - self.name(), - self.id() - )), + Some(format!("start to create Digital Ocean Kubernetes cluster {} with id {}", self.name(), self.id())), self.context.execution_id(), )); self.logger().log( @@ -575,12 +533,7 @@ impl DOKS { ), ); self.send_to_customer( - format!( - "Deploying DOKS {} cluster deployment with id {}", - self.name(), - self.id() - ) - .as_str(), + format!("Deploying DOKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); @@ -613,10 +566,7 @@ impl DOKS { } Err(e) => self.logger().log( LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), + EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), ), }; @@ -631,10 +581,7 @@ impl DOKS { // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } let kubeconfig_path = &self.get_kubeconfig_file_path()?; @@ -783,11 +730,8 @@ impl DOKS { ..Default::default() }; - let helm = Helm::new( - &kubeconfig_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; // This will ony print the diff on stdout let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &[]); @@ -809,10 +753,9 @@ impl DOKS { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), - ), + Ok(ok_line) => self + .logger() + .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None))), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -923,16 +866,11 @@ impl DOKS { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = format!( - "Ensuring everything is up to date before deleting cluster {}/{}", - self.name(), - self.id() - ); + let message = + format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1020,10 +958,8 @@ impl DOKS { } } Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); + let message_safe = + format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1046,11 +982,8 @@ impl DOKS { ); // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubeconfig_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1188,10 +1121,8 @@ impl DOKS { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1221,10 +1152,9 @@ impl DOKS { ); Ok(()) } - Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - error, - )), + Err(Operation { error, .. }) => { + Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) + } Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1385,12 +1315,7 @@ impl Kubernetes for DOKS { }) .expect("Unable to create directory"); - let file_path = format!( - "{}/qovery-kubeconfigs-{}/{}.yaml", - workspace_directory, - self.id(), - self.id() - ); + let file_path = format!("{}/qovery-kubeconfigs-{}/{}.yaml", workspace_directory, self.id(), self.id()); let path = Path::new(file_path.as_str()); let parent_dir = path.parent().unwrap(); let _ = block_on(tokio::fs::create_dir_all(parent_dir)); @@ -1466,12 +1391,7 @@ impl Kubernetes for DOKS { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!( - "Start preparing DOKS upgrade process {} cluster with id {}", - self.name(), - self.id() - ) - .as_str(), + format!("Start preparing DOKS upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); self.logger().log( @@ -1502,11 +1422,7 @@ impl Kubernetes for DOKS { // Upgrade worker nodes // self.send_to_customer( - format!( - "Preparing workers nodes for upgrade for Kubernetes cluster {}", - self.name() - ) - .as_str(), + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); self.logger().log( @@ -1566,10 +1482,7 @@ impl Kubernetes for DOKS { )); } - self.send_to_customer( - format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), - &listeners_helper, - ); + self.send_to_customer(format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper); self.logger().log( LogLevel::Info, EngineEvent::Deploying( @@ -1604,10 +1517,7 @@ impl Kubernetes for DOKS { } }, Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 1fa1ae7f..50ca7dc2 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -105,9 +105,7 @@ impl CloudProvider for DO { let client = DigitalOcean::new(&self.token); match client { Ok(_x) => Ok(()), - Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )), + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials(event_details)), } } diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 7994e5b7..3ac4931e 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -128,10 +128,7 @@ fn do_get_vpcs_from_api_output(json_content: &str) -> Result, CommandEr Ok(vpcs) => Ok(vpcs.vpcs), Err(e) => { let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API"; - Err(CommandError::new( - format!("{}, error: {}", message_safe, e), - Some(message_safe.to_string()), - )) + Err(CommandError::new(format!("{}, error: {}", message_safe, e), Some(message_safe.to_string()))) } } } @@ -258,10 +255,7 @@ mod tests_do_vpcs { let vpc_subnets: Vec = vpcs.into_iter().map(|x| x.ip_range).collect(); let joined_subnets = vpc_subnets.join(","); - assert_eq!( - joined_subnets, - "10.2.0.0/16,10.110.0.0/20,10.116.0.0/20,10.1.0.0/16,10.0.0.0/16" - ); + assert_eq!(joined_subnets, "10.2.0.0/16,10.110.0.0/20,10.116.0.0/20,10.1.0.0/16,10.0.0.0/16"); } #[test] @@ -270,11 +264,9 @@ mod tests_do_vpcs { let vpcs = do_get_vpcs_from_api_output(&json_content).unwrap(); // available - assert!( - get_do_vpc_from_subnet("10.3.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) - .unwrap() - .is_none() - ); + assert!(get_do_vpc_from_subnet("10.3.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) + .unwrap() + .is_none()); // already used assert_eq!( get_do_vpc_from_subnet("10.2.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) @@ -286,11 +278,9 @@ mod tests_do_vpcs { // DO reserved subnet in the same region assert!(get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt).is_err()); // DO reserved subnet in another region - assert!( - get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs, DoRegion::London) - .unwrap() - .is_none() - ); + assert!(get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs, DoRegion::London) + .unwrap() + .is_none()); } #[test] diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index 91db5f5b..83d769fc 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -288,10 +288,7 @@ impl Helm for RouterDo { } fn helm_chart_values_dir(&self) -> String { - format!( - "{}/digitalocean/chart_values/nginx-ingress", - self.context.lib_root_dir() - ) + format!("{}/digitalocean/chart_values/nginx-ingress", self.context.lib_root_dir()) } fn helm_chart_external_name_service_dir(&self) -> String { diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index d8a397d1..41ddd4dd 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -155,14 +155,9 @@ pub trait HelmChart: Send { let chart = self.get_chart_info(); for file in chart.values_files.iter() { if let Err(e) = fs::metadata(file) { - let safe_message = format!( - "Can't access helm chart override file `{}` for chart `{}`", - file, chart.name, - ); - return Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message), - )); + let safe_message = + format!("Can't access helm chart override file `{}` for chart `{}`", file, chart.name,); + return Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message))); } } Ok(None) @@ -229,10 +224,7 @@ pub trait HelmChart: Send { match chart_info.action { HelmAction::Deploy => { if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { - warn!( - "error while trying to destroy chart if breaking change is detected: {:?}", - e.to_string() - ); + warn!("error while trying to destroy chart if breaking change is detected: {:?}", e.to_string()); } helm.upgrade(chart_info, &[]).map_err(to_command_error)?; @@ -307,10 +299,8 @@ fn deploy_parallel_charts( } Err(e) => { let safe_message = "Thread panicked during parallel charts deployments."; - let error = Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message.to_string()), - )); + let error = + Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message.to_string()))); errors.push(error); } } @@ -597,10 +587,7 @@ impl HelmChart for PrometheusOperatorConfigChart { match chart_info.action { HelmAction::Deploy => { if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { - warn!( - "error while trying to destroy chart if breaking change is detected: {}", - e.to_string() - ); + warn!("error while trying to destroy chart if breaking change is detected: {}", e.to_string()); } helm.upgrade(chart_info, &[]).map_err(to_command_error)?; @@ -666,12 +653,8 @@ pub fn get_chart_for_shell_agent( context: ShellAgentContext, chart_path: impl Fn(&str) -> String, ) -> Result { - let shell_agent_version: QoveryShellAgent = get_qovery_app_version( - QoveryAppName::ShellAgent, - context.api_token, - context.api_url, - context.cluster_id, - )?; + let shell_agent_version: QoveryShellAgent = + get_qovery_app_version(QoveryAppName::ShellAgent, context.api_token, context.api_url, context.cluster_id)?; let shell_agent = CommonChart { chart_info: ChartInfo { name: "shell-agent".to_string(), diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 9cffdb6b..f8068df8 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -795,11 +795,7 @@ where EngineEvent::Deleting( event_details.clone(), EventMessage::new( - format!( - "Encountering issues while trying to get objects kind {}: {:?}", - object, - e.message() - ), + format!("Encountering issues while trying to get objects kind {}: {:?}", object, e.message()), None, ), ), @@ -808,9 +804,8 @@ where } // delete if resource exists - match retry::retry( - Fibonacci::from_millis(5000).take(3), - || match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { + match retry::retry(Fibonacci::from_millis(5000).take(3), || { + match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { Ok(_) => OperationResult::Ok(()), Err(e) => { logger.log( @@ -822,8 +817,8 @@ where ); OperationResult::Retry(e) } - }, - ) { + } + }) { Ok(_) => {} Err(Operation { error, .. }) => { return Err(EngineError::new_cannot_uninstall_helm_chart( @@ -866,10 +861,7 @@ where let masters_version = match VersionsNumber::from_str(raw_version.as_str()) { Ok(vn) => vn, Err(_) => { - return Err(EngineError::new_cannot_determine_k8s_master_version( - event_details, - raw_version.to_string(), - )) + return Err(EngineError::new_cannot_determine_k8s_master_version(event_details, raw_version.to_string())) } }; @@ -904,13 +896,7 @@ where } } - check_kubernetes_upgrade_status( - requested_version, - masters_version, - workers_version, - event_details, - logger, - ) + check_kubernetes_upgrade_status(requested_version, masters_version, workers_version, event_details, logger) } pub fn is_kubernetes_upgradable

( @@ -936,10 +922,7 @@ where Ok(()) } }, - Err(err) => Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget( - event_details, - err, - )), + Err(err) => Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget(event_details, err)), } } @@ -1074,14 +1057,12 @@ fn check_kubernetes_upgrade_status( } } Err(e) => { - return Err( - EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details, - deployed_masters_version, - wished_version, - e, - ), - ) + return Err(EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( + event_details, + deployed_masters_version, + wished_version, + e, + )) } }; @@ -1127,14 +1108,12 @@ fn check_kubernetes_upgrade_status( non_up_to_date_workers += 1; } Err(e) => { - return Err( - EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details, - node, - wished_version, - e, - ), - ) + return Err(EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( + event_details, + node, + wished_version, + e, + )) } } } @@ -1274,18 +1253,9 @@ where F: Fn() -> R, { let waiting_message = match action { - Action::Create => Some(format!( - "Infrastructure '{}' deployment is in progress...", - kubernetes.name_with_id() - )), - Action::Pause => Some(format!( - "Infrastructure '{}' pause is in progress...", - kubernetes.name_with_id() - )), - Action::Delete => Some(format!( - "Infrastructure '{}' deletion is in progress...", - kubernetes.name_with_id() - )), + Action::Create => Some(format!("Infrastructure '{}' deployment is in progress...", kubernetes.name_with_id())), + Action::Pause => Some(format!("Infrastructure '{}' pause is in progress...", kubernetes.name_with_id())), + Action::Delete => Some(format!("Infrastructure '{}' deletion is in progress...", kubernetes.name_with_id())), Action::Nothing => None, }; @@ -1423,10 +1393,7 @@ pub fn validate_k8s_required_cpu_and_burstable( context_id, )); - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(message)), - ); + logger.log(LogLevel::Warning, EngineEvent::Warning(event_details, EventMessage::new_from_safe(message))); set_cpu_burst = total_cpu.clone(); } @@ -1447,10 +1414,7 @@ pub fn convert_k8s_cpu_value_to_f32(value: String) -> Result } Err(e) => Err(CommandError::new( e.to_string(), - Some(format!( - "Error while trying to parse `{}` to float 32.", - value_number_string.as_str() - )), + Some(format!("Error while trying to parse `{}` to float 32.", value_number_string.as_str())), )), }; } diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index d9a27611..314b3d2f 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -52,24 +52,15 @@ pub fn get_qovery_app_version( QoveryAppName::ShellAgent => "shellAgent", }; - let url = format!( - "https://{}/api/v1/{}-version?type=cluster&clusterId={}", - api_fqdn, app_type, cluster_id - ); + let url = format!("https://{}/api/v1/{}-version?type=cluster&clusterId={}", api_fqdn, app_type, cluster_id); let message_safe = format!("Error while trying to get `{}` version.", app_type); match reqwest::blocking::Client::new().get(&url).headers(headers).send() { Ok(x) => match x.json::() { Ok(qa) => Ok(qa), - Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )), + Err(e) => Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))), }, - Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )), + Err(e) => Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))), } } diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs index 4e088c7a..52a87c0f 100644 --- a/src/cloud_provider/scaleway/application.rs +++ b/src/cloud_provider/scaleway/application.rs @@ -275,10 +275,7 @@ impl Service for ApplicationScw { context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } // container registry credentials @@ -358,12 +355,7 @@ impl Pause for ApplicationScw { ); send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) + scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) }) } @@ -553,10 +545,7 @@ impl FromStr for ScwZone { "nl-ams-1" => Ok(ScwZone::Amsterdam1), "pl-waw-1" => Ok(ScwZone::Warsaw1), _ => { - return Err(CommandError::new_from_safe_message(format!( - "`{}` zone is not supported", - s - ))); + return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); } } } diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index fbdb3670..0e552ce4 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -66,12 +66,7 @@ impl MongoDbScw { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mongodb_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for MongoDbScw { impl ToTransmitter for MongoDbScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -198,10 +189,7 @@ impl Service for MongoDbScw { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -219,10 +207,7 @@ impl Service for MongoDbScw { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -291,12 +276,7 @@ impl Create for MongoDbScw { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index 1400b456..f6ce64a1 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -122,11 +122,7 @@ impl StatefulService for MySQLScw { impl ToTransmitter for MySQLScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -224,10 +220,7 @@ impl Service for MySQLScw { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -250,10 +243,7 @@ impl Service for MySQLScw { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("skip_final_snapshot", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -322,12 +312,7 @@ impl Create for MySQLScw { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index bd6fb29c..d64cbf27 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -131,11 +131,7 @@ impl StatefulService for PostgresScw { impl ToTransmitter for PostgresScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -233,10 +229,7 @@ impl Service for PostgresScw { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_name", self.sanitized_name().as_str()); context.insert("database_db_name", self.name()); @@ -259,10 +252,7 @@ impl Service for PostgresScw { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("skip_final_snapshot", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -331,12 +321,7 @@ impl Create for PostgresScw { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 4cdd09dd..287b5162 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -66,12 +66,7 @@ impl RedisScw { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_redis_version(self.version()), - self, - event_details, - self.logger(), - ) + check_service_version(get_self_hosted_redis_version(self.version()), self, event_details, self.logger()) } fn cloud_provider_name(&self) -> &str { @@ -95,11 +90,7 @@ impl StatefulService for RedisScw { impl ToTransmitter for RedisScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } @@ -197,10 +188,7 @@ impl Service for RedisScw { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -217,10 +205,7 @@ impl Service for RedisScw { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -289,12 +274,7 @@ impl Create for RedisScw { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 7e99be2e..68cf8da6 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -116,10 +116,7 @@ pub fn scw_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), - )); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -128,14 +125,8 @@ pub fn scw_helm_charts( let qovery_terraform_config: ScalewayQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); - return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); + return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); } }; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 274ca894..a4679af5 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -271,9 +271,9 @@ impl Kapsule { let error_cluster_id = "expected cluster id for this Scaleway cluster".to_string(); let cluster_id = match cluster_info.id { None => { - return Err(ScwNodeGroupErrors::NodeGroupValidationError( - CommandError::new_from_safe_message(error_cluster_id), - )) + return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new_from_safe_message( + error_cluster_id, + ))) } Some(x) => x, }; @@ -292,10 +292,7 @@ impl Kapsule { Err(e) => { let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id); let msg_with_error = format!("{}. {:?}", msg, e); - return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( - msg_with_error, - Some(msg), - ))); + return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new(msg_with_error, Some(msg)))); } }; @@ -306,24 +303,15 @@ impl Kapsule { &cluster_id, &cluster_info.name.unwrap_or_else(|| "unknown cluster".to_string()) ); - return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( - msg.clone(), - Some(msg), - ))); + return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new(msg.clone(), Some(msg)))); } // create sanitized nodegroup pools let mut nodegroup_pool: Vec = Vec::with_capacity(pools.total_count.unwrap_or(0 as f32) as usize); for ng in pools.pools.unwrap() { if ng.id.is_none() { - let msg = format!( - "error while trying to validate SCW pool ID from cluster {}", - &cluster_id - ); - return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new( - msg.clone(), - Some(msg), - ))); + let msg = format!("error while trying to validate SCW pool ID from cluster {}", &cluster_id); + return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new(msg.clone(), Some(msg)))); } let ng_sanitized = self.get_node_group_info(ng.id.unwrap().as_str())?; nodegroup_pool.push(ng_sanitized) @@ -342,10 +330,8 @@ impl Kapsule { Err(e) => { return Err(match e { Error::ResponseError(x) => { - let msg_with_error = format!( - "Error code while getting node group: {}, API message: {} ", - x.status, x.content - ); + let msg_with_error = + format!("Error code while getting node group: {}, API message: {} ", x.status, x.content); match x.status { StatusCode::NOT_FOUND => ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( msg_with_error, @@ -451,22 +437,10 @@ impl Kapsule { context.insert("managed_dns", &managed_dns_list); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); - context.insert( - "managed_dns_domains_root_terraform_format", - &managed_dns_domains_root_terraform_format, - ); - context.insert( - "managed_dns_resolvers_terraform_format", - &managed_dns_resolvers_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); + context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); + context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); @@ -492,29 +466,14 @@ impl Kapsule { context.insert("qovery_nats_url", self.options.qovery_nats_url.as_str()); context.insert("qovery_nats_user", self.options.qovery_nats_user.as_str()); context.insert("qovery_nats_password", self.options.qovery_nats_password.as_str()); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); - context.insert( - "metrics_history_enabled", - &self.context.is_feature_enabled(&Features::MetricsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); + context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } // AWS S3 tfstates storage tfstates @@ -532,10 +491,8 @@ impl Kapsule { .secret_access_key .as_str(), ); - context.insert( - "aws_region_tfstates_account", - self.cloud_provider().terraform_state_credentials().region.as_str(), - ); + context + .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); @@ -734,10 +691,7 @@ impl Kapsule { } Err(e) => self.logger().log( LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), + EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), ), }; @@ -775,10 +729,7 @@ impl Kapsule { // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } // push config file to object storage @@ -817,10 +768,7 @@ impl Kapsule { Err(e) => { match e { ScwNodeGroupErrors::CloudProviderApiError(c) => { - return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details, - Some(c), - )) + return Err(EngineError::new_missing_api_info_from_cloud_provider_error(event_details, Some(c))) } ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log( LogLevel::Info, @@ -1080,10 +1028,9 @@ impl Kapsule { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line)), - ), + Ok(ok_line) => self + .logger() + .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line))), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -1282,16 +1229,11 @@ impl Kapsule { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message))); Ok(()) } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )), + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), } } @@ -1358,16 +1300,11 @@ impl Kapsule { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = format!( - "Ensuring everything is up to date before deleting cluster {}/{}", - self.name(), - self.id() - ); + let message = + format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1455,10 +1392,8 @@ impl Kapsule { } } Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); + let message_safe = + format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1481,11 +1416,8 @@ impl Kapsule { ); // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubeconfig_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1623,10 +1555,8 @@ impl Kapsule { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); self.logger().log( LogLevel::Info, @@ -1656,10 +1586,9 @@ impl Kapsule { ); Ok(()) } - Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - error, - )), + Err(Operation { error, .. }) => { + Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) + } Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1773,12 +1702,7 @@ impl Kubernetes for Kapsule { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!( - "Start preparing Kapsule upgrade process {} cluster with id {}", - self.name(), - self.id() - ) - .as_str(), + format!("Start preparing Kapsule upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); self.logger().log( @@ -1820,10 +1744,8 @@ impl Kubernetes for Kapsule { ), ); - context.insert( - "kubernetes_cluster_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); + context + .insert("kubernetes_cluster_version", format!("{}", &kubernetes_upgrade_status.requested_version).as_str()); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1851,10 +1773,7 @@ impl Kubernetes for Kapsule { )); } - self.send_to_customer( - format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), - &listeners_helper, - ); + self.send_to_customer(format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper); self.logger().log( LogLevel::Info, EngineEvent::Deploying( @@ -1889,10 +1808,7 @@ impl Kubernetes for Kapsule { } }, Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details, - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 56b531f6..3d975b89 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -78,11 +78,7 @@ pub trait Service: ToTransmitter { true => fqdn.to_string(), false => match is_managed { true => format!("{}-dns.{}.svc.cluster.local", self.id(), target.environment.namespace()), - false => format!( - "{}.{}.svc.cluster.local", - self.sanitized_name(), - target.environment.namespace() - ), + false => format!("{}.{}.svc.cluster.local", self.sanitized_name(), target.environment.namespace()), }, } } @@ -422,11 +418,9 @@ where })?; // do exec helm upgrade and return the last deployment status - let helm = helm::Helm::new( - &kubernetes_config_file_path, - &kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| helm::to_engine_error(&event_details, e))?; + let helm = + helm::Helm::new(&kubernetes_config_file_path, &kubernetes.cloud_provider().credentials_environment_variables()) + .map_err(|e| helm::to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_custom_namespace( helm_release_name, workspace_dir.clone(), @@ -932,11 +926,7 @@ where )) } Err(_err) => { - let message = format!( - "{} version {} is not supported!", - service.service_type().name(), - service.version(), - ); + let message = format!("{} version {} is not supported!", service.service_type().name(), service.version(),); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -998,12 +988,7 @@ pub fn check_kubernetes_service_error( where T: Service + ?Sized, { - let message = format!( - "{} {} {}", - action_verb, - service.service_type().name().to_lowercase(), - service.name() - ); + let message = format!("{} {} {}", action_verb, service.service_type().name().to_lowercase(), service.name()); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -1022,10 +1007,8 @@ where } CheckAction::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), - ); + logger + .log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message))); } CheckAction::Delete => { listeners_helper.delete_in_progress(progress_info); @@ -1259,16 +1242,12 @@ where service.service_type().name(), service.name_with_id() )), - Action::Pause => Some(format!( - "{} '{}' pause is in progress...", - service.service_type().name(), - service.name_with_id() - )), - Action::Delete => Some(format!( - "{} '{}' deletion is in progress...", - service.service_type().name(), - service.name_with_id() - )), + Action::Pause => { + Some(format!("{} '{}' pause is in progress...", service.service_type().name(), service.name_with_id())) + } + Action::Delete => { + Some(format!("{} '{}' deletion is in progress...", service.service_type().name(), service.name_with_id())) + } Action::Nothing => None, }; @@ -1291,12 +1270,8 @@ where let logger = service.logger().clone_dyn(); let listeners = std::clone::Clone::clone(service.listeners()); - let progress_info = ProgressInfo::new( - service.progress_scope(), - Info, - waiting_message.clone(), - service.context().execution_id(), - ); + let progress_info = + ProgressInfo::new(service.progress_scope(), Info, waiting_message.clone(), service.context().execution_id()); let (tx, rx) = mpsc::channel(); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index cc0bf073..a1ee5989 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -183,10 +183,8 @@ pub fn generate_supported_version( } else { for minor in minor_min..minor_max + 1 { // add short minor format targeting latest version - supported_versions.insert( - format!("{}.{}", major, minor), - format!("{}.{}.{}", major, minor, update_max.unwrap()), - ); + supported_versions + .insert(format!("{}.{}", major, minor), format!("{}.{}.{}", major, minor, update_max.unwrap())); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); @@ -240,11 +238,7 @@ impl VersionsNumber { } pub fn to_major_minor_version_string(&self, default_minor: &str) -> String { - let test = format!( - "{}.{}", - self.major.clone(), - self.minor.as_ref().unwrap_or(&default_minor.to_string()) - ); + let test = format!("{}.{}", self.major.clone(), self.minor.as_ref().unwrap_or(&default_minor.to_string())); test } @@ -255,9 +249,7 @@ impl FromStr for VersionsNumber { fn from_str(version: &str) -> Result { if version.trim() == "" { - return Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )); + return Err(CommandError::new_from_safe_message("version cannot be empty".to_string())); } let mut version_split = version.splitn(4, '.').map(|v| v.trim()); @@ -382,11 +374,7 @@ pub fn check_cname_for( }; send_deployment_progress( - format!( - "Checking CNAME resolution of '{}'. Please wait, it can take some time...", - cname_to_check - ) - .as_str(), + format!("Checking CNAME resolution of '{}'. Please wait, it can take some time...", cname_to_check).as_str(), ); // Trying for 5 min to resolve CNAME @@ -397,17 +385,12 @@ pub fn check_cname_for( resolver }; let fixed_iterable = Fixed::from_millis(Duration::seconds(5).num_milliseconds() as u64).take(6 * 5); - let check_result = retry::retry(fixed_iterable, || { - match get_cname_record_value(next_resolver(), cname_to_check) { - Some(domain) => OperationResult::Ok(domain), - None => { - let msg = format!( - "Cannot find domain under CNAME {}. Retrying in 5 seconds...", - cname_to_check - ); - send_deployment_progress(msg.as_str()); - OperationResult::Retry(msg) - } + let check_result = retry::retry(fixed_iterable, || match get_cname_record_value(next_resolver(), cname_to_check) { + Some(domain) => OperationResult::Ok(domain), + None => { + let msg = format!("Cannot find domain under CNAME {}. Retrying in 5 seconds...", cname_to_check); + send_deployment_progress(msg.as_str()); + OperationResult::Retry(msg) } }); @@ -439,10 +422,7 @@ pub fn check_domain_for( let resolvers = dns_resolvers(); for domain in domains_to_check { - let message = format!( - "Let's check domain resolution for '{}'. Please wait, it can take some time...", - domain - ); + let message = format!("Let's check domain resolution for '{}'. Please wait, it can take some time...", domain); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -555,19 +535,10 @@ pub fn print_action( event_details: EventDetails, logger: &dyn Logger, ) { - let msg = format!( - "{}.{}.{} called for {}", - cloud_provider_name, struct_name, fn_name, item_name - ); + let msg = format!("{}.{}.{} called for {}", cloud_provider_name, struct_name, fn_name, item_name); match fn_name.contains("error") { - true => logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg)), - ), - false => logger.log( - LogLevel::Info, - EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), - ), + true => logger.log(LogLevel::Warning, EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg))), + false => logger.log(LogLevel::Info, EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))), } } @@ -597,23 +568,17 @@ mod tests { let test_cases = vec![ TestCase { input: "", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "empty version str", }, TestCase { input: " ", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "version a tab str", }, TestCase { input: " ", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "version as a space str", }, TestCase { diff --git a/src/cmd/command.rs b/src/cmd/command.rs index a49da2ec..5167d215 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -116,11 +116,7 @@ impl QoveryCommand { } pub fn exec(&mut self) -> Result<(), CommandError> { - self.exec_with_abort( - &mut |line| info!("{}", line), - &mut |line| warn!("{}", line), - &CommandKiller::never(), - ) + self.exec_with_abort(&mut |line| info!("{}", line), &mut |line| warn!("{}", line), &CommandKiller::never()) } pub fn exec_with_output( @@ -277,10 +273,7 @@ impl QoveryCommand { } if !exit_status.success() { - debug!( - "command: {:?} terminated with error exist status {:?}", - self.command, exit_status - ); + debug!("command: {:?} terminated with error exist status {:?}", self.command, exit_status); return Err(ExitStatusError(exit_status)); } @@ -352,29 +345,17 @@ mod tests { #[test] fn test_command_with_timeout() { let mut cmd = QoveryCommand::new("sleep", &["120"], &[]); - let ret = cmd.exec_with_abort( - &mut |_| {}, - &mut |_| {}, - &CommandKiller::from_timeout(Duration::from_secs(2)), - ); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sh", &["-c", "cat /dev/urandom | grep -a --null-data ."], &[]); - let ret = cmd.exec_with_abort( - &mut |_| {}, - &mut |_| {}, - &CommandKiller::from_timeout(Duration::from_secs(2)), - ); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); let mut cmd = QoveryCommand::new("sleep", &["1"], &[]); - let ret = cmd.exec_with_abort( - &mut |_| {}, - &mut |_| {}, - &CommandKiller::from_timeout(Duration::from_secs(2)), - ); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); assert!(ret.is_ok()); } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index e55517bf..d523ac77 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -81,13 +81,8 @@ impl Docker { // First check that the buildx plugin is correctly installed let args = vec!["buildx", "version"]; - let buildx_cmd_exist = docker_exec( - &args, - &docker.get_all_envs(&[]), - &mut |_| {}, - &mut |_| {}, - &CommandKiller::never(), - ); + let buildx_cmd_exist = + docker_exec(&args, &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); if buildx_cmd_exist.is_err() { return Err(DockerError::InvalidConfig( "Docker buildx plugin for buildkit is not correctly installed".to_string(), @@ -106,13 +101,7 @@ impl Docker { "--bootstrap", "--use", ]; - let _ = docker_exec( - &args, - &docker.get_all_envs(&[]), - &mut |_| {}, - &mut |_| {}, - &CommandKiller::never(), - ); + let _ = docker_exec(&args, &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); Ok(docker) } @@ -199,13 +188,7 @@ impl Docker { { info!("Docker pull {:?}", image); - docker_exec( - &["pull", &image.image_name()], - &self.get_all_envs(&[]), - stdout_output, - stderr_output, - should_abort, - ) + docker_exec(&["pull", &image.image_name()], &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort) } pub fn build( @@ -407,13 +390,7 @@ impl Docker { let mut args = vec!["push"]; args.extend(image_names.iter().map(|x| x.as_str())); - docker_exec( - &args, - &self.get_all_envs(&[]), - stdout_output, - stderr_output, - should_abort, - ) + docker_exec(&args, &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort) } pub fn prune_images(&self) -> Result<(), DockerError> { @@ -429,13 +406,7 @@ impl Docker { let mut errored_commands = vec![]; for prune in all_prunes_commands { - let ret = docker_exec( - &prune, - &self.get_all_envs(&[]), - &mut |_| {}, - &mut |_| {}, - &CommandKiller::never(), - ); + let ret = docker_exec(&prune, &self.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); if let Err(e) = ret { errored_commands.push(e); } diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index a02bcc7e..54b6024a 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -129,12 +129,9 @@ impl Helm { let mut stdout = String::new(); let mut stderr = String::new(); - match helm_exec_with_output( - &args, - &self.get_all_envs(envs), - &mut |line| stdout.push_str(&line), - &mut |line| stderr.push_str(&line), - ) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |line| stdout.push_str(&line), &mut |line| { + stderr.push_str(&line) + }) { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { stderr.push_str(&err.message()); @@ -172,9 +169,7 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| { - stderr.push_str(&line) - }) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -207,9 +202,7 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| { - stderr.push_str(&line) - }) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -288,10 +281,7 @@ impl Helm { Err(HelmError::CmdError( "none".to_string(), LIST, - CommandError::new( - format!("{}, error: {}", message_safe, e), - Some(message_safe.to_string()), - ), + CommandError::new(format!("{}, error: {}", message_safe, e), Some(message_safe.to_string())), )) } } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 4fafb7e4..f49b4616 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -105,12 +105,9 @@ where let mut output_vec: Vec = Vec::with_capacity(20); let mut err_output_vec: Vec = Vec::with_capacity(20); let cmd_args = vec!["get", "svc", "-n", namespace, service_name, "-o", "json"]; - let _ = kubectl_exec_with_output( - cmd_args.clone(), - envs.clone(), - &mut |line| output_vec.push(line), - &mut |line| err_output_vec.push(line), - )?; + let _ = kubectl_exec_with_output(cmd_args.clone(), envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + err_output_vec.push(line) + })?; let output_string: String = output_vec.join("\n"); let err_output_string: String = output_vec.join("\n"); @@ -165,9 +162,7 @@ where return Ok(None); } - Ok(Some( - result.metadata.annotations.kubernetes_digitalocean_com_load_balancer_id, - )) + Ok(Some(result.metadata.annotations.kubernetes_digitalocean_com_load_balancer_id)) } Err(e) => Err(e), } @@ -192,9 +187,7 @@ where return Ok(None); } - Ok(Some( - result.status.load_balancer.ingress.first().unwrap().hostname.clone(), - )) + Ok(Some(result.status.load_balancer.ingress.first().unwrap().hostname.clone())) } pub fn kubectl_exec_is_pod_ready_with_retry

( @@ -419,9 +412,7 @@ where P: AsRef, { if labels.is_empty() { - return Err(CommandError::new_from_safe_message( - "No labels were defined, can't set them".to_string(), - )); + return Err(CommandError::new_from_safe_message("No labels were defined, can't set them".to_string())); }; if !kubectl_exec_is_namespace_present(kubernetes_config.as_ref(), namespace, envs.clone()) { @@ -445,9 +436,8 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs.clone()); - let _ = kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| { - error!("{}", line) - })?; + let _ = + kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| error!("{}", line))?; Ok(()) } @@ -555,12 +545,10 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs); - let _ = kubectl_exec_with_output( - vec!["delete", "crd", crd_name], - _envs, - &mut |line| info!("{}", line), - &mut |line| error!("{}", line), - )?; + let _ = + kubectl_exec_with_output(vec!["delete", "crd", crd_name], _envs, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + })?; Ok(()) } @@ -680,12 +668,7 @@ where environment_variables.push(("KUBECONFIG", kubernetes_config.as_ref().to_str().unwrap())); let args = vec!["-n", namespace, "rollout", "restart", "deployment", name]; - kubectl_exec_with_output( - args, - environment_variables, - &mut |line| info!("{}", line), - &mut |line| error!("{}", line), - ) + kubectl_exec_with_output(args, environment_variables, &mut |line| info!("{}", line), &mut |line| error!("{}", line)) } pub fn kubectl_exec_get_node

( @@ -784,11 +767,7 @@ pub fn kubectl_exec_get_configmap

( where P: AsRef, { - kubectl_exec::( - vec!["get", "configmap", "-o", "json", "-n", namespace, name], - kubernetes_config, - envs, - ) + kubectl_exec::(vec!["get", "configmap", "-o", "json", "-n", namespace, name], kubernetes_config, envs) } pub fn kubectl_exec_get_json_events

( @@ -878,10 +857,7 @@ where P: AsRef, { let pods = specific_pod_name.unwrap_or("*"); - let api_url = format!( - "/apis/custom.metrics.k8s.io/v1beta1/namespaces/{}/pods/{}/{}", - namespace, pods, metric_name - ); + let api_url = format!("/apis/custom.metrics.k8s.io/v1beta1/namespaces/{}/pods/{}/{}", namespace, pods, metric_name); kubectl_exec::(vec!["get", "--raw", api_url.as_str()], kubernetes_config, envs) } @@ -1035,22 +1011,14 @@ pub fn kubectl_get_pvc

(kubernetes_config: P, namespace: &str, envs: Vec<(&str where P: AsRef, { - kubectl_exec::( - vec!["get", "pvc", "-o", "json", "-n", namespace], - kubernetes_config, - envs, - ) + kubectl_exec::(vec!["get", "pvc", "-o", "json", "-n", namespace], kubernetes_config, envs) } pub fn kubectl_get_svc

(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result where P: AsRef, { - kubectl_exec::( - vec!["get", "svc", "-o", "json", "-n", namespace], - kubernetes_config, - envs, - ) + kubectl_exec::(vec!["get", "svc", "-o", "json", "-n", namespace], kubernetes_config, envs) } /// kubectl_delete_crash_looping_pods: delete crash looping pods. @@ -1190,12 +1158,9 @@ where _envs.extend(envs); let mut output_vec: Vec = Vec::with_capacity(50); - let _ = kubectl_exec_with_output( - args.clone(), - _envs.clone(), - &mut |line| output_vec.push(line), - &mut |line| error!("{}", line), - )?; + let _ = kubectl_exec_with_output(args.clone(), _envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + error!("{}", line) + })?; let output_string: String = output_vec.join(""); diff --git a/src/cmd/structs.rs b/src/cmd/structs.rs index ea39b03f..54c7ff67 100644 --- a/src/cmd/structs.rs +++ b/src/cmd/structs.rs @@ -1311,10 +1311,7 @@ mod tests { assert_eq!(pod_status.is_ok(), true); let pod_status = pod_status.unwrap(); assert_eq!(pod_status.items[0].status.conditions[0].status, "False"); - assert_eq!( - pod_status.items[0].status.conditions[0].reason, - KubernetesPodStatusReason::CrashLoopBackOff - ); + assert_eq!(pod_status.items[0].status.conditions[0].reason, KubernetesPodStatusReason::CrashLoopBackOff); let payload = r#"{ "apiVersion": "v1", @@ -1587,10 +1584,7 @@ mod tests { let pod_status = serde_json::from_str::>(payload); assert!(pod_status.is_ok()); - assert_eq!( - pod_status.unwrap().items[0].status.conditions[0].reason, - KubernetesPodStatusReason::Unknown(None) - ); + assert_eq!(pod_status.unwrap().items[0].status.conditions[0].reason, KubernetesPodStatusReason::Unknown(None)); } #[test] diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index 2c0a018c..c9af2b30 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -27,10 +27,7 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re Ok(_) => Ok(()), Err(e) => Err(CommandError::new( format!("Wasn't able to delete terraform lock file {}", &terraform_provider_lock), - Some(format!( - "Wasn't able to delete terraform lock file {}, error: {:?}", - &terraform_provider_lock, e - )), + Some(format!("Wasn't able to delete terraform lock file {}, error: {:?}", &terraform_provider_lock, e)), )), }; } else if err.message().contains("Plugin reinitialization required") { @@ -38,9 +35,7 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re return Ok(()); } - Err(CommandError::new_from_safe_message( - "Not known method to fix this Terraform issue".to_string(), - )) + Err(CommandError::new_from_safe_message("Not known method to fix this Terraform issue".to_string())) } fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> { @@ -192,11 +187,7 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co let mut stdout = Vec::new(); let mut stderr = Vec::new(); - let mut cmd = QoveryCommand::new( - "terraform", - &args, - &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())], - ); + let mut cmd = QoveryCommand::new("terraform", &args, &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())]); cmd.set_current_dir(root_dir); let result = cmd.exec_with_output( diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index e369495a..4eb68f5e 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -148,11 +148,8 @@ impl DOCR { } pub fn exec_docr_login(&self) -> Result<(), ContainerRegistryError> { - let mut cmd = QoveryCommand::new( - "doctl", - &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], - &[], - ); + let mut cmd = + QoveryCommand::new("doctl", &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], &[]); match cmd.exec() { Ok(_) => Ok(()), diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 6e6f3bd5..0db19cea 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -77,12 +77,7 @@ impl ECR { } pub fn credentials(&self) -> StaticProvider { - StaticProvider::new( - self.access_key_id.to_string(), - self.secret_access_key.to_string(), - None, - None, - ) + StaticProvider::new(self.access_key_id.to_string(), self.secret_access_key.to_string(), None, None) } pub fn client(&self) -> Client { diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 134c7b7a..5e257c11 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -73,9 +73,7 @@ impl DnsProvider for Cloudflare { fn is_valid(&self) -> Result<(), EngineError> { if self.cloudflare_api_token.is_empty() || self.cloudflare_email.is_empty() { - Err(EngineError::new_client_invalid_cloud_provider_credentials( - self.get_event_details(), - )) + Err(EngineError::new_client_invalid_cloud_provider_credentials(self.get_event_details())) } else { Ok(()) } diff --git a/src/error.rs b/src/error.rs index 81afaf4d..00fd95a5 100644 --- a/src/error.rs +++ b/src/error.rs @@ -110,20 +110,13 @@ pub fn cast_simple_error_to_engine_error>( match input { Err(simple_error) => { let message = match simple_error.kind { - SimpleErrorKind::Command(exit_status) => format!( - "{} ({})", - simple_error.message.unwrap_or_else(|| "".into()), - exit_status - ), + SimpleErrorKind::Command(exit_status) => { + format!("{} ({})", simple_error.message.unwrap_or_else(|| "".into()), exit_status) + } SimpleErrorKind::Other => simple_error.message.unwrap_or_else(|| "".into()), }; - Err(EngineError::new( - EngineErrorCause::Internal, - scope, - execution_id, - Some(message), - )) + Err(EngineError::new(EngineErrorCause::Internal, scope, execution_id, Some(message))) } Ok(x) => Ok(x), } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 02ba96a3..2871e4e8 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -469,15 +469,7 @@ impl EngineError { link: Option, hint_message: Option, ) -> EngineError { - EngineError::new( - event_details, - Tag::Unknown, - qovery_log_message, - user_log_message, - message, - link, - hint_message, - ) + EngineError::new(event_details, Tag::Unknown, qovery_log_message, user_log_message, message, link, hint_message) } /// Creates new error for missing required env variable. @@ -490,15 +482,7 @@ impl EngineError { /// * `variable_name`: Variable name which is not set. pub fn new_missing_required_env_variable(event_details: EventDetails, variable_name: String) -> EngineError { let message = format!("`{}` environment variable wasn't found.", variable_name); - EngineError::new( - event_details, - Tag::MissingRequiredEnvVariable, - message.to_string(), - message, - None, - None, - None, - ) + EngineError::new(event_details, Tag::MissingRequiredEnvVariable, message.to_string(), message, None, None, None) } /// Creates new error for cluster has no worker nodes. @@ -732,10 +716,7 @@ impl EngineError { let mut message = vec!["There is not enough resources on the cluster:".to_string()]; if requested_cpu > free_cpu { - message.push(format!( - "{} CPU requested and only {} CPU available", - free_cpu, requested_cpu - )); + message.push(format!("{} CPU requested and only {} CPU available", free_cpu, requested_cpu)); } if requested_ram_in_mib > free_ram_in_mib { @@ -849,10 +830,7 @@ impl EngineError { event_details: EventDetails, kubernetes_raw_version: String, ) -> EngineError { - let message = format!( - "Unable to determine Kubernetes master version: `{}`", - kubernetes_raw_version, - ); + let message = format!("Unable to determine Kubernetes master version: `{}`", kubernetes_raw_version,); EngineError::new( event_details, @@ -903,10 +881,7 @@ impl EngineError { event_details: EventDetails, kubelet_worker_raw_version: String, ) -> EngineError { - let message = format!( - "Unable to determine Kubelet worker version: `{}`", - kubelet_worker_raw_version, - ); + let message = format!("Unable to determine Kubelet worker version: `{}`", kubelet_worker_raw_version,); EngineError::new( event_details, @@ -972,10 +947,7 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `pod_name`: Pod name having PDB in an invalid state. pub fn new_k8s_pod_disruption_budget_invalid_state(event_details: EventDetails, pod_name: String) -> EngineError { - let message = format!( - "Unable to upgrade Kubernetes, pdb for app `{}` in invalid state.", - pod_name, - ); + let message = format!("Unable to upgrade Kubernetes, pdb for app `{}` in invalid state.", pod_name,); EngineError::new( event_details, @@ -1203,15 +1175,7 @@ impl EngineError { selector, namespace ); - EngineError::new( - event_details, - Tag::K8sGetLogs, - message.to_string(), - message, - Some(raw_error), - None, - None, - ) + EngineError::new(event_details, Tag::K8sGetLogs, message.to_string(), message, Some(raw_error), None, None) } /// Creates new error for kubernetes get events. @@ -1228,15 +1192,7 @@ impl EngineError { ) -> EngineError { let message = format!("Error, unable to retrieve events in namespace `{}`.", namespace); - EngineError::new( - event_details, - Tag::K8sGetLogs, - message.to_string(), - message, - Some(raw_error), - None, - None, - ) + EngineError::new(event_details, Tag::K8sGetLogs, message.to_string(), message, Some(raw_error), None, None) } /// Creates new error for kubernetes describe. @@ -1253,20 +1209,10 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, unable to describe pod with selector `{}` in namespace `{}`.", - selector, namespace - ); + let message = + format!("Error, unable to describe pod with selector `{}` in namespace `{}`.", selector, namespace); - EngineError::new( - event_details, - Tag::K8sDescribe, - message.to_string(), - message, - Some(raw_error), - None, - None, - ) + EngineError::new(event_details, Tag::K8sDescribe, message.to_string(), message, Some(raw_error), None, None) } /// Creates new error for kubernetes history. @@ -1279,15 +1225,7 @@ impl EngineError { pub fn new_k8s_history(event_details: EventDetails, namespace: String, raw_error: CommandError) -> EngineError { let message = format!("Error, unable to get history in namespace `{}`.", namespace); - EngineError::new( - event_details, - Tag::K8sHistory, - message.to_string(), - message, - Some(raw_error), - None, - None, - ) + EngineError::new(event_details, Tag::K8sHistory, message.to_string(), message, Some(raw_error), None, None) } /// Creates new error for kubernetes namespace creation issue. @@ -1329,10 +1267,7 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, pod with selector `{}` in namespace `{}` is not ready.", - selector, namespace - ); + let message = format!("Error, pod with selector `{}` in namespace `{}` is not ready.", selector, namespace); EngineError::new( event_details, @@ -1357,10 +1292,7 @@ impl EngineError { requested_version: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, node is not ready with the requested version `{}`.", - requested_version - ); + let message = format!("Error, node is not ready with the requested version `{}`.", requested_version); EngineError::new( event_details, @@ -1432,15 +1364,7 @@ impl EngineError { pub fn new_missing_required_binary(event_details: EventDetails, missing_binary_name: String) -> EngineError { let message = format!("`{}` binary is required but was not found.", missing_binary_name); - EngineError::new( - event_details, - Tag::CannotFindRequiredBinary, - message.to_string(), - message, - None, - None, - None, - ) + EngineError::new(event_details, Tag::CannotFindRequiredBinary, message.to_string(), message, None, None, None) } /// Creates new error for subnets count not being even. Subnets count should be even to get the same number as private and public. @@ -1460,15 +1384,7 @@ impl EngineError { zone_name, subnets_count, ); - EngineError::new( - event_details, - Tag::SubnetsCountShouldBeEven, - message.to_string(), - message, - None, - None, - None, - ) + EngineError::new(event_details, Tag::SubnetsCountShouldBeEven, message.to_string(), message, None, None, None) } /// Creates new error for IAM role which cannot be retrieved or created. @@ -1510,10 +1426,7 @@ impl EngineError { to_dir: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to copy all files from `{}` to `{}`.", - from_dir, to_dir - ); + let message = format!("Error while trying to copy all files from `{}` to `{}`.", from_dir, to_dir); EngineError::new( event_details, @@ -1655,10 +1568,8 @@ impl EngineError { parameter_value: String, raw_error: Option, ) -> EngineError { - let message = format!( - "{} value `{}` not supported for parameter `{}`", - service_type, parameter_value, parameter_name, - ); + let message = + format!("{} value `{}` not supported for parameter `{}`", service_type, parameter_value, parameter_name,); EngineError::new( event_details, @@ -1756,15 +1667,7 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `error`: Raw error message. pub fn new_build_error(event_details: EventDetails, error: BuildError) -> EngineError { - EngineError::new( - event_details, - Tag::BuilderError, - error.to_string(), - error.to_string(), - None, - None, - None, - ) + EngineError::new(event_details, Tag::BuilderError, error.to_string(), error.to_string(), None, None, None) } /// Creates new error from an Container Registry error @@ -1829,10 +1732,8 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to get helm chart `{}` history in namespace `{}`.", - helm_chart, namespace - ); + let message = + format!("Error while trying to get helm chart `{}` history in namespace `{}`.", helm_chart, namespace); EngineError::new( event_details, @@ -1904,15 +1805,7 @@ impl EngineError { ) -> EngineError { let message = format!("Error, version `{}` is not supported for `{}`.", version, product_name); - EngineError::new( - event_details, - Tag::UnsupportedVersion, - message.to_string(), - message, - None, - None, - None, - ) + EngineError::new(event_details, Tag::UnsupportedVersion, message.to_string(), message, None, None, None) } /// Creates new error while trying to get cluster. @@ -1975,10 +1868,7 @@ impl EngineError { service_name: String, ) -> EngineError { // TODO(benjaminch): Service should probably passed otherwise, either inside event_details or via a new dedicated struct. - let message = format!( - "Service `{}` (id `{}`) failed to deploy (before start).", - service_name, service_id - ); + let message = format!("Service `{}` (id `{}`) failed to deploy (before start).", service_name, service_id); EngineError::new( event_details, @@ -2005,10 +1895,8 @@ impl EngineError { service_type: String, raw_error: Option, ) -> EngineError { - let message = format!( - "Database `{}` (id `{}`) failed to start after several retries.", - service_type, service_id - ); + let message = + format!("Database `{}` (id `{}`) failed to start after several retries.", service_type, service_id); EngineError::new( event_details, @@ -2071,10 +1959,7 @@ impl EngineError { raw_version_number: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to parse `{}` to a version number.", - raw_version_number - ); + let message = format!("Error while trying to parse `{}` to a version number.", raw_version_number); EngineError::new( event_details, @@ -2199,10 +2084,7 @@ impl EngineError { event_details: EventDetails, requested_language: String, ) -> EngineError { - let message = format!( - "Cannot build: Invalid buildpacks language format: `{}`.", - requested_language - ); + let message = format!("Cannot build: Invalid buildpacks language format: `{}`.", requested_language); EngineError::new( event_details, @@ -2321,15 +2203,7 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `error`: Raw error message. pub fn new_docker_error(event_details: EventDetails, error: DockerError) -> EngineError { - EngineError::new( - event_details, - Tag::DockerError, - error.to_string(), - error.to_string(), - None, - None, - None, - ) + EngineError::new(event_details, Tag::DockerError, error.to_string(), error.to_string(), None, None, None) } /// Creates new error when trying to push a Docker image. @@ -2346,10 +2220,8 @@ impl EngineError { repository_url: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, trying to push Docker image `{}` to repository `{}`.", - image_name, repository_url - ); + let message = + format!("Error, trying to push Docker image `{}` to repository `{}`.", image_name, repository_url); EngineError::new( event_details, @@ -2376,10 +2248,8 @@ impl EngineError { repository_url: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, trying to pull Docker image `{}` from repository `{}`.", - image_name, repository_url - ); + let message = + format!("Error, trying to pull Docker image `{}` from repository `{}`.", image_name, repository_url); EngineError::new( event_details, @@ -2481,10 +2351,7 @@ impl EngineError { registry_name: String, raw_error: ContainerRegistryError, ) -> EngineError { - let message = format!( - "Error, trying to create registry `{}` in `{}`.", - registry_name, repository_name - ); + let message = format!("Error, trying to create registry `{}` in `{}`.", registry_name, repository_name); EngineError::new( event_details, @@ -2509,10 +2376,7 @@ impl EngineError { repository_name: String, raw_error: ContainerRegistryError, ) -> EngineError { - let message = format!( - "Error, trying to set lifecycle policy repository `{}`.", - repository_name, - ); + let message = format!("Error, trying to set lifecycle policy repository `{}`.", repository_name,); EngineError::new( event_details, @@ -2535,10 +2399,8 @@ impl EngineError { event_details: EventDetails, repository_name: String, ) -> EngineError { - let message = format!( - "Failed to retrieve credentials and endpoint URL from container registry `{}`.", - repository_name, - ); + let message = + format!("Failed to retrieve credentials and endpoint URL from container registry `{}`.", repository_name,); EngineError::new( event_details, @@ -2755,10 +2617,7 @@ impl EngineError { file_name: String, raw_error: ObjectStorageError, ) -> EngineError { - let message = format!( - "Error, cannot put file `{}` into object storage bucket `{}`.", - file_name, bucket_name, - ); + let message = format!("Error, cannot put file `{}` into object storage bucket `{}`.", file_name, bucket_name,); EngineError::new( event_details, @@ -2833,10 +2692,8 @@ impl EngineError { bucket_name: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to activate versioning for object storage bucket `{}`.", - bucket_name, - ); + let message = + format!("Error while trying to activate versioning for object storage bucket `{}`.", bucket_name,); EngineError::new( event_details, diff --git a/src/events/mod.rs b/src/events/mod.rs index 0ef04c6e..b8c9e2ab 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -458,24 +458,9 @@ mod tests { fn test_event_message() { // setup: let test_cases: Vec<(String, Option, EventMessageVerbosity, String)> = vec![ - ( - "safe".to_string(), - Some("raw".to_string()), - EventMessageVerbosity::SafeOnly, - "safe".to_string(), - ), - ( - "safe".to_string(), - None, - EventMessageVerbosity::SafeOnly, - "safe".to_string(), - ), - ( - "safe".to_string(), - None, - EventMessageVerbosity::FullDetails, - "safe".to_string(), - ), + ("safe".to_string(), Some("raw".to_string()), EventMessageVerbosity::SafeOnly, "safe".to_string()), + ("safe".to_string(), None, EventMessageVerbosity::SafeOnly, "safe".to_string()), + ("safe".to_string(), None, EventMessageVerbosity::FullDetails, "safe".to_string()), ( "safe".to_string(), Some("raw".to_string()), @@ -498,54 +483,21 @@ mod tests { fn test_stage_sub_step_name() { // setup: let test_cases: Vec<(Stage, String)> = vec![ - ( - Stage::Infrastructure(InfrastructureStep::Create), - InfrastructureStep::Create.to_string(), - ), - ( - Stage::Infrastructure(InfrastructureStep::Upgrade), - InfrastructureStep::Upgrade.to_string(), - ), - ( - Stage::Infrastructure(InfrastructureStep::Delete), - InfrastructureStep::Delete.to_string(), - ), - ( - Stage::Infrastructure(InfrastructureStep::Resume), - InfrastructureStep::Resume.to_string(), - ), - ( - Stage::Infrastructure(InfrastructureStep::Pause), - InfrastructureStep::Pause.to_string(), - ), + (Stage::Infrastructure(InfrastructureStep::Create), InfrastructureStep::Create.to_string()), + (Stage::Infrastructure(InfrastructureStep::Upgrade), InfrastructureStep::Upgrade.to_string()), + (Stage::Infrastructure(InfrastructureStep::Delete), InfrastructureStep::Delete.to_string()), + (Stage::Infrastructure(InfrastructureStep::Resume), InfrastructureStep::Resume.to_string()), + (Stage::Infrastructure(InfrastructureStep::Pause), InfrastructureStep::Pause.to_string()), ( Stage::Infrastructure(InfrastructureStep::LoadConfiguration), InfrastructureStep::LoadConfiguration.to_string(), ), - ( - Stage::Environment(EnvironmentStep::Pause), - EnvironmentStep::Pause.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Resume), - EnvironmentStep::Resume.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Build), - EnvironmentStep::Build.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Delete), - EnvironmentStep::Delete.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Update), - EnvironmentStep::Update.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Deploy), - EnvironmentStep::Deploy.to_string(), - ), + (Stage::Environment(EnvironmentStep::Pause), EnvironmentStep::Pause.to_string()), + (Stage::Environment(EnvironmentStep::Resume), EnvironmentStep::Resume.to_string()), + (Stage::Environment(EnvironmentStep::Build), EnvironmentStep::Build.to_string()), + (Stage::Environment(EnvironmentStep::Delete), EnvironmentStep::Delete.to_string()), + (Stage::Environment(EnvironmentStep::Update), EnvironmentStep::Update.to_string()), + (Stage::Environment(EnvironmentStep::Deploy), EnvironmentStep::Deploy.to_string()), ]; for tc in test_cases { diff --git a/src/fs.rs b/src/fs.rs index 43cae13e..e5364cca 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -113,11 +113,7 @@ pub fn cleanup_workspace_directory(working_root_dir: &str, execution_id: &str) - Err(err) => { error!( "{}", - format!( - "error trying to remove workspace directory '{}', error: {}", - workspace_dir.as_str(), - err - ) + format!("error trying to remove workspace directory '{}', error: {}", workspace_dir.as_str(), err) ); Err(err) } @@ -168,11 +164,7 @@ mod tests { // setup: let execution_id: &str = "123"; let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); - let root_dir = format!( - "{}/.qovery-workspace/{}", - tmp_dir.path().to_str().unwrap(), - execution_id - ); + let root_dir = format!("{}/.qovery-workspace/{}", tmp_dir.path().to_str().unwrap(), execution_id); let root_dir_path = Path::new(root_dir.as_str()); let directories_to_create = vec![ @@ -215,10 +207,8 @@ mod tests { .collect::>(); // execute: - let result = archive_workspace_directory( - tmp_dir.path().to_str().expect("error getting file path string"), - execution_id, - ); + let result = + archive_workspace_directory(tmp_dir.path().to_str().expect("error getting file path string"), execution_id); // verify: assert_eq!(true, result.is_ok()); diff --git a/src/git.rs b/src/git.rs index bebf0d19..8831df13 100644 --- a/src/git.rs +++ b/src/git.rs @@ -17,10 +17,7 @@ fn authentication_callback<'a>( move |remote_url, username_from_url, allowed_types| { // If we have changed remote, reset our available auth methods if remote_url != current_credentials.0 { - current_credentials = ( - remote_url.to_string(), - get_credentials(username_from_url.unwrap_or("git")), - ); + current_credentials = (remote_url.to_string(), get_credentials(username_from_url.unwrap_or("git"))); } let auth_methods = &mut current_credentials.1; @@ -52,10 +49,7 @@ fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, .find_remote("origin") .map(|remote| remote.url().unwrap_or_default().to_string()) .unwrap_or_default(); - let msg = format!( - "Unable to use git object commit ID {} on repository {}: {}", - &commit_id, &repo_url, &err - ); + let msg = format!("Unable to use git object commit ID {} on repository {}: {}", &commit_id, &repo_url, &err); Error::from_str(&msg) })?; @@ -194,19 +188,11 @@ mod tests { let repo_path = repo_dir.path(); // We only allow https:// at the moment - let repo = clone( - &Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), - &repo_path, - &|_| vec![], - ); + let repo = clone(&Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), &repo_path, &|_| vec![]); assert!(matches!(repo, Err(e) if e.message().contains("https://"))); // Repository must be empty - let repo = clone( - &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), - &repo_path, - &|_| vec![], - ); + let repo = clone(&Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), &repo_path, &|_| vec![]); assert!(repo.is_ok()); // clone makes sure to empty the directory // Working case @@ -224,16 +210,10 @@ mod tests { { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_clone".to_string()); let get_credentials = |_: &str| { - vec![( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext("FAKE", "FAKE").unwrap(), - )] + vec![(CredentialType::USER_PASS_PLAINTEXT, Cred::userpass_plaintext("FAKE", "FAKE").unwrap())] }; - let repo = clone( - &Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), - clone_dir.path(), - &get_credentials, - ); + let repo = + clone(&Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), clone_dir.path(), &get_credentials); assert!(matches!(repo, Err(repo) if repo.message().contains("authentication"))); } @@ -266,12 +246,9 @@ mod tests { #[test] fn test_git_checkout() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_checkout".to_string()); - let repo = clone( - &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), - clone_dir.path(), - &|_| vec![], - ) - .unwrap(); + let repo = + clone(&Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![]) + .unwrap(); // Invalid commit for this repository let check = checkout(&repo, "c2c2101f8e4c4ffadb326dc440ba8afb4aeb1310"); @@ -328,10 +305,7 @@ mod tests { CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &invalid_ssh_key, Some("toto")).unwrap(), ), - ( - CredentialType::SSH_MEMORY, - Cred::ssh_key_from_memory(user, None, &ssh_key, None).unwrap(), - ), + (CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &ssh_key, None).unwrap()), ( CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &invalid_ssh_key, Some("toto")).unwrap(), diff --git a/src/logger.rs b/src/logger.rs index 2d96b8d3..e99ea075 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -129,10 +129,7 @@ mod tests { ), qovery_message.to_string(), user_message.to_string(), - Some(errors::CommandError::new( - safe_message.to_string(), - Some(raw_message.to_string()), - )), + Some(errors::CommandError::new(safe_message.to_string(), Some(raw_message.to_string()))), Some(link), Some(hint.to_string()), ), @@ -208,21 +205,9 @@ mod tests { tc.description ); - assert!( - logs_contain(format!("organization_id=\"{}\"", orga_id.short()).as_str()), - "{}", - tc.description - ); - assert!( - logs_contain(format!("cluster_id=\"{}\"", cluster_id.short()).as_str()), - "{}", - tc.description - ); - assert!( - logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), - "{}", - tc.description - ); + assert!(logs_contain(format!("organization_id=\"{}\"", orga_id.short()).as_str()), "{}", tc.description); + assert!(logs_contain(format!("cluster_id=\"{}\"", cluster_id.short()).as_str()), "{}", tc.description); + assert!(logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), "{}", tc.description); let details = tc.event.get_details(); assert!( @@ -255,21 +240,13 @@ mod tests { tc.description ); - assert!( - logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), - "{}", - tc.description - ); + assert!(logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), "{}", tc.description); assert!( logs_contain(format!("step=\"{}\"", details.stage().sub_step_name()).as_str()), "{}", tc.description ); - assert!( - logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), - "{}", - tc.description - ); + assert!(logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), "{}", tc.description); // Logger should display everything assert!(logs_contain(safe_message), "{}", tc.description); diff --git a/src/models.rs b/src/models.rs index aa5fa07a..109c410d 100644 --- a/src/models.rs +++ b/src/models.rs @@ -56,10 +56,7 @@ impl QoveryIdentifier { } pub fn new_from_long_id(raw_long_id: String) -> Self { - QoveryIdentifier::new( - raw_long_id.to_string(), - QoveryIdentifier::extract_short(raw_long_id.as_str()), - ) + QoveryIdentifier::new(raw_long_id.to_string(), QoveryIdentifier::extract_short(raw_long_id.as_str())) } pub fn new_random() -> Self { @@ -825,10 +822,7 @@ impl Database { Some(db) } Err(e) => { - error!( - "{}", - format!("error while parsing postgres version, error: {}", e.message()) - ); + error!("{}", format!("error while parsing postgres version, error: {}", e.message())); None } }, @@ -853,10 +847,7 @@ impl Database { Some(db) } Err(e) => { - error!( - "{}", - format!("error while parsing mysql version, error: {}", e.message()) - ); + error!("{}", format!("error while parsing mysql version, error: {}", e.message())); None } }, @@ -1435,16 +1426,8 @@ mod tests { let result = QoveryIdentifier::new_from_long_id(tc.input.clone()); // verify: - assert_eq!( - tc.expected_long_id_output, result.raw_long_id, - "case {} : '{}'", - tc.description, tc.input - ); - assert_eq!( - tc.expected_short_output, result.short, - "case {} : '{}'", - tc.description, tc.input - ); + assert_eq!(tc.expected_long_id_output, result.raw_long_id, "case {} : '{}'", tc.description, tc.input); + assert_eq!(tc.expected_short_output, result.short, "case {} : '{}'", tc.description, tc.input); } } } diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 886d962f..5d4fbbcb 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -60,10 +60,8 @@ impl S3 { fn get_s3_client(&self) -> S3Client { let region = RusotoRegion::from_str(&self.region.to_aws_format()) .unwrap_or_else(|_| panic!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format())); - let client = Client::new_with( - self.get_credentials(), - HttpClient::new().expect("unable to create new Http client"), - ); + let client = + Client::new_with(self.get_credentials(), HttpClient::new().expect("unable to create new Http client")); S3Client::new_with_client(client, region) } diff --git a/src/template.rs b/src/template.rs index cd71541a..1cf101fd 100644 --- a/src/template.rs +++ b/src/template.rs @@ -22,10 +22,9 @@ where let error_msg = match e.kind { tera::ErrorKind::TemplateNotFound(x) => format!("template not found: {}", x), tera::ErrorKind::Msg(x) => format!("tera error: {}", x), - tera::ErrorKind::CircularExtend { tpl, inheritance_chain } => format!( - "circular extend - template: {}, inheritance chain: {:?}", - tpl, inheritance_chain - ), + tera::ErrorKind::CircularExtend { tpl, inheritance_chain } => { + format!("circular extend - template: {}, inheritance chain: {:?}", tpl, inheritance_chain) + } tera::ErrorKind::MissingParent { current, parent } => { format!("missing parent - current: {}, parent: {}", current, parent) } diff --git a/src/transaction.rs b/src/transaction.rs index 8e163229..f29f9b82 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -468,12 +468,8 @@ impl<'a> Transaction<'a> { T: Service + ?Sized, { let lh = ListenersHelper::new(kubernetes.listeners()); - let progress_info = ProgressInfo::new( - service.progress_scope(), - ProgressLevel::Info, - None::<&str>, - execution_id, - ); + let progress_info = + ProgressInfo::new(service.progress_scope(), ProgressLevel::Info, None::<&str>, execution_id); if !is_error { match action { @@ -511,23 +507,11 @@ impl<'a> Transaction<'a> { // !!! don't change the order // terminal update for service in environment.stateful_services() { - send_progress( - self.engine.kubernetes(), - &environment.action, - service, - execution_id, - true, - ); + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, true); } for service in environment.stateless_services() { - send_progress( - self.engine.kubernetes(), - &environment.action, - service, - execution_id, - true, - ); + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, true); } return rollback_result; @@ -535,23 +519,11 @@ impl<'a> Transaction<'a> { _ => { // terminal update for service in environment.stateful_services() { - send_progress( - self.engine.kubernetes(), - &environment.action, - service, - execution_id, - false, - ); + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, false); } for service in environment.stateless_services() { - send_progress( - self.engine.kubernetes(), - &environment.action, - service, - execution_id, - false, - ); + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, false); } } }; diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 41f59b17..d2eae14f 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -93,14 +93,7 @@ impl Cluster for AWS { vpc_network_mode, ); - EngineConfig::new( - context.clone(), - build_platform, - container_registry, - cloud_provider, - dns_provider, - k, - ) + EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, k) } fn cloud_provider(context: &Context) -> Box { diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 176352dc..64042fb1 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -469,12 +469,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); let router_id = generate_id(); let router_name = "main".to_string(); - let application_domain = format!( - "{}.{}.{}", - application_id, - context.cluster_id().to_string(), - test_domain - ); + let application_domain = format!("{}.{}.{}", application_id, context.cluster_id().to_string(), test_domain); EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), @@ -1129,12 +1124,7 @@ pub fn test_db( match database_mode.clone() { DatabaseMode::CONTAINER => { - match get_pvc( - context.clone(), - provider_kind.clone(), - environment.clone(), - secrets.clone(), - ) { + match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -1142,12 +1132,7 @@ pub fn test_db( Err(_) => assert!(false), }; - match get_svc( - context.clone(), - provider_kind.clone(), - environment.clone(), - secrets.clone(), - ) { + match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(svc) => assert_eq!( svc.items .expect("No items in svc") @@ -1610,9 +1595,7 @@ where .expect("No hpa condition.") .contains("ValidMetricFound") { - return Err(CommandError::new_from_safe_message( - "Metrics server doesn't work".to_string(), - )); + return Err(CommandError::new_from_safe_message("Metrics server doesn't work".to_string())); } } Ok(()) diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index e5ac3dc1..8339566b 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -33,13 +33,7 @@ pub const DO_SELF_HOSTED_DATABASE_DISK_TYPE: &str = "do-block-storage"; pub fn container_registry_digital_ocean(context: &Context) -> DOCR { let secrets = FuncTestsSecrets::new(); - DOCR::new( - context.clone(), - DOCR_ID, - DOCR_ID, - secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), - ) - .unwrap() + DOCR::new(context.clone(), DOCR_ID, DOCR_ID, secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str()).unwrap() } pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { @@ -82,14 +76,7 @@ impl Cluster for DO { vpc_network_mode, ); - EngineConfig::new( - context.clone(), - build_platform, - container_registry, - cloud_provider, - dns_provider, - k, - ) + EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, k) } fn cloud_provider(context: &Context) -> Box { diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 1941a90b..31da6441 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -104,14 +104,7 @@ impl Cluster for Scaleway { vpc_network_mode, ); - EngineConfig::new( - context.clone(), - build_platform, - container_registry, - cloud_provider, - dns_provider, - cluster, - ) + EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, cluster) } fn cloud_provider(context: &Context) -> Box { @@ -229,14 +222,8 @@ pub fn clean_environments( let secret_token = secrets.SCALEWAY_SECRET_KEY.unwrap(); let project_id = secrets.SCALEWAY_DEFAULT_PROJECT_ID.unwrap(); - let container_registry_client = ScalewayCR::new( - context.clone(), - "test", - "test", - secret_token.as_str(), - project_id.as_str(), - zone, - )?; + let container_registry_client = + ScalewayCR::new(context.clone(), "test", "test", secret_token.as_str(), project_id.as_str(), zone)?; // delete images created in registry let registry_url = container_registry_client.registry_info(); diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index b6b36685..42b6d3ac 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -169,21 +169,13 @@ impl FuncTestsSecrets { fn get_vault_config() -> Result { let vault_addr = match env::var_os("VAULT_ADDR") { Some(x) => x.into_string().unwrap(), - None => { - return Err(Error::new( - ErrorKind::NotFound, - format!("VAULT_ADDR environment variable is missing"), - )) - } + None => return Err(Error::new(ErrorKind::NotFound, format!("VAULT_ADDR environment variable is missing"))), }; let vault_token = match env::var_os("VAULT_TOKEN") { Some(x) => x.into_string().unwrap(), None => { - return Err(Error::new( - ErrorKind::NotFound, - format!("VAULT_TOKEN environment variable is missing"), - )) + return Err(Error::new(ErrorKind::NotFound, format!("VAULT_TOKEN environment variable is missing"))) } }; @@ -378,10 +370,7 @@ pub fn init() -> Instant { None => tracing_subscriber::fmt().try_init(), }; - info!( - "running from current directory: {}", - std::env::current_dir().unwrap().to_str().unwrap() - ); + info!("running from current directory: {}", std::env::current_dir().unwrap().to_str().unwrap()); Instant::now() } @@ -457,10 +446,7 @@ pub fn check_all_connections(env: &EnvironmentRequest) -> Vec { let mut checking: Vec = Vec::with_capacity(env.routers.len()); for router_to_test in &env.routers { - let path_to_test = format!( - "https://{}{}", - &router_to_test.default_domain, &router_to_test.routes[0].path - ); + let path_to_test = format!("https://{}{}", &router_to_test.default_domain, &router_to_test.routes[0].path); checking.push(curl_path(path_to_test.as_str())); } @@ -637,9 +623,7 @@ where } } - Err(CommandError::new_from_safe_message( - "Test cluster not found".to_string(), - )) + Err(CommandError::new_from_safe_message("Test cluster not found".to_string())) } }; @@ -689,10 +673,7 @@ fn get_cloud_provider_credentials(provider_kind: Kind, secrets: &FuncTestsSecret match provider_kind { Kind::Aws => vec![ (AWS_ACCESS_KEY_ID, secrets.AWS_ACCESS_KEY_ID.as_ref().unwrap().as_str()), - ( - AWS_SECRET_ACCESS_KEY, - secrets.AWS_SECRET_ACCESS_KEY.as_ref().unwrap().as_str(), - ), + (AWS_SECRET_ACCESS_KEY, secrets.AWS_SECRET_ACCESS_KEY.as_ref().unwrap().as_str()), ], Kind::Do => vec![ ( @@ -718,18 +699,9 @@ fn get_cloud_provider_credentials(provider_kind: Kind, secrets: &FuncTestsSecret ), ], Kind::Scw => vec![ - ( - SCALEWAY_ACCESS_KEY, - secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str(), - ), - ( - SCALEWAY_SECRET_KEY, - secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str(), - ), - ( - SCALEWAY_DEFAULT_PROJECT_ID, - secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str(), - ), + (SCALEWAY_ACCESS_KEY, secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str()), + (SCALEWAY_SECRET_KEY, secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str()), + (SCALEWAY_DEFAULT_PROJECT_ID, secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str()), ], } } @@ -770,11 +742,7 @@ pub fn is_pod_restarted_env( pod_to_check: &str, secrets: FuncTestsSecrets, ) -> (bool, String) { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -805,11 +773,7 @@ pub fn get_pods( pod_to_check: &str, secrets: FuncTestsSecrets, ) -> Result, CommandError> { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -877,11 +841,7 @@ pub fn get_pvc( environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -906,11 +866,7 @@ pub fn get_svc( environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 5e7a8d51..37fb5292 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -330,10 +330,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); - assert!(matches!( - ret, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); test_name.to_string() }) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 21ed7fca..40fe92ce 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -117,13 +117,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Aws, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -131,13 +125,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods( - context.clone(), - Kind::Aws, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -182,13 +170,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods( - context, - Kind::Aws, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -286,10 +268,7 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); - assert!(matches!( - ret, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); test_name.to_string() }) @@ -647,13 +626,8 @@ fn redeploy_same_app_with_ebs() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = is_pod_restarted_env( - context.clone(), - Kind::Aws, - environment_check1, - app_name.as_str(), - secrets.clone(), - ); + let (_, number) = + is_pod_restarted_env(context.clone(), Kind::Aws, environment_check1, app_name.as_str(), secrets.clone()); let ret = environment_redeploy.deploy_environment(&ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); @@ -822,18 +796,12 @@ fn deploy_ok_fail_fail_ok_environment() { // FAIL and rollback let ret = not_working_env_1.deploy_environment(&ea_not_working_1, logger.clone(), &engine_config_for_not_working_1); - assert!(matches!( - ret, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // FAIL and Rollback again let ret = not_working_env_2.deploy_environment(&ea_not_working_2, logger.clone(), &engine_config_for_not_working_2); - assert!(matches!( - ret, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // Should be working let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index 53f790b8..4e0d67bc 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -25,10 +25,7 @@ fn create_and_destroy_eks_cluster( cluster_test( test_name, Kind::Aws, - context( - generate_id().as_str(), - generate_cluster_id(region.to_string().as_str()).as_str(), - ), + context(generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str()), logger(), region.to_aws_format().as_str(), Some(zones), diff --git a/tests/aws/aws_s3.rs b/tests/aws/aws_s3.rs index 1616762f..fb0b699f 100644 --- a/tests/aws/aws_s3.rs +++ b/tests/aws/aws_s3.rs @@ -40,11 +40,7 @@ fn test_delete_bucket() { let result = aws_os.delete_bucket(bucket_name.as_str()); // validate: - assert!( - result.is_ok(), - "Delete bucket failed in `{}`", - aws_region.to_aws_format() - ); + assert!(result.is_ok(), "Delete bucket failed in `{}`", aws_region.to_aws_format()); assert!( !aws_os.bucket_exists(bucket_name.as_str()), "Delete bucket failed in `{}`, bucket still exists", @@ -83,11 +79,7 @@ fn test_create_bucket() { let result = aws_os.create_bucket(bucket_name.as_str()); // validate: - assert!( - result.is_ok(), - "Create bucket failed in `{}`", - aws_region.to_aws_format() - ); + assert!(result.is_ok(), "Create bucket failed in `{}`", aws_region.to_aws_format()); assert!( aws_os.bucket_exists(bucket_name.as_str()), "Create bucket failed in `{}`, bucket doesn't exist", @@ -95,13 +87,9 @@ fn test_create_bucket() { ); // clean-up: - aws_os.delete_bucket(bucket_name.as_str()).unwrap_or_else(|_| { - panic!( - "error deleting S3 bucket `{}` in `{}`", - bucket_name, - aws_region.to_aws_format() - ) - }); + aws_os + .delete_bucket(bucket_name.as_str()) + .unwrap_or_else(|_| panic!("error deleting S3 bucket `{}` in `{}`", bucket_name, aws_region.to_aws_format())); } #[cfg(feature = "test-aws-infra")] @@ -184,11 +172,7 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = aws_os.put( - bucket_name.as_str(), - object_key.as_str(), - temp_file.into_temp_path().to_str().unwrap(), - ); + let result = aws_os.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); // validate: assert!(result.is_ok()); diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 5707d8a9..0341671c 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -131,13 +131,7 @@ fn deploy_an_environment_with_db_and_pause_it() { // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods( - context.clone(), - ProviderKind::Do, - environment.clone(), - app_name.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), ProviderKind::Do, environment.clone(), app_name.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -220,12 +214,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments( - &context, - vec![environment, environment_delete], - secrets.clone(), - DO_TEST_REGION, - ) { + if let Err(e) = + clean_environments(&context, vec![environment, environment_delete], secrets.clone(), DO_TEST_REGION) + { warn!("cannot clean environments, error: {:?}", e); } @@ -371,10 +362,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); - assert!(matches!( - ret, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); // delete images created during test from registries if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index b3df0295..28eee4e5 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -125,10 +125,7 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let result = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); - assert!(matches!( - result, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); @@ -179,13 +176,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Do, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -193,13 +184,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods( - context.clone(), - Kind::Do, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -209,13 +194,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Do, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -518,24 +497,14 @@ fn digitalocean_doks_redeploy_same_app() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = is_pod_restarted_env( - context.clone(), - Kind::Do, - environment_check1, - app_name.as_str(), - secrets.clone(), - ); + let (_, number) = + is_pod_restarted_env(context.clone(), Kind::Do, environment_check1, app_name.as_str(), secrets.clone()); let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); - let (_, number2) = is_pod_restarted_env( - context.clone(), - Kind::Do, - environment_check2, - app_name.as_str(), - secrets.clone(), - ); + let (_, number2) = + is_pod_restarted_env(context.clone(), Kind::Do, environment_check2, app_name.as_str(), secrets.clone()); // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); @@ -708,10 +677,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_1, ); - assert!(matches!( - result, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // FAIL and Rollback again let result = not_working_env_2.deploy_environment( @@ -719,10 +685,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_2, ); - assert!(matches!( - result, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // Should be working let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index 2ab58577..54f853b1 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -170,18 +170,11 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = spaces.put( - bucket_name.as_str(), - object_key.as_str(), - temp_file.into_temp_path().to_str().unwrap(), - ); + let result = spaces.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); // validate: assert!(result.is_ok()); - assert_eq!( - true, - spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok() - ); + assert_eq!(true, spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok()); // clean-up: spaces @@ -226,10 +219,7 @@ fn test_get_file() { // validate: assert!(result.is_ok()); - assert_eq!( - true, - spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok() - ); + assert_eq!(true, spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok()); // clean-up: spaces diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index 9b38fb54..aef046c2 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -63,10 +63,7 @@ fn test_get_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); // clean-up: container_registry.delete_registry_namespace(&image).unwrap(); @@ -194,10 +191,7 @@ fn test_get_or_create_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); let added_registry_result = container_registry.get_registry_namespace(&image); assert_eq!(true, added_registry_result.is_some()); @@ -215,10 +209,7 @@ fn test_get_or_create_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); let added_registry_result = container_registry.get_registry_namespace(&image); assert_eq!(true, added_registry_result.is_some()); diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 0cce29ee..5bed5672 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -137,13 +137,7 @@ fn deploy_an_environment_with_db_and_pause_it() { // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods( - context.clone(), - ProviderKind::Scw, - environment.clone(), - app_name.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), ProviderKind::Scw, environment.clone(), app_name.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -222,12 +216,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments( - &context, - vec![environment, environment_delete], - secrets.clone(), - SCW_TEST_ZONE, - ) { + if let Err(e) = + clean_environments(&context, vec![environment, environment_delete], secrets.clone(), SCW_TEST_ZONE) + { warn!("cannot clean environments, error: {:?}", e); } @@ -376,10 +367,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); - assert!(matches!( - result, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); // delete images created during test from registries if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 68114006..791bc5d7 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -129,10 +129,7 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let result = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); - assert!(matches!( - result, - TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); @@ -185,13 +182,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -199,13 +190,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -215,13 +200,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -496,13 +475,7 @@ fn deploy_a_working_environment_and_pause_it() { let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -510,13 +483,7 @@ fn deploy_a_working_environment_and_pause_it() { assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -616,24 +583,14 @@ fn scaleway_kapsule_redeploy_same_app() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = is_pod_restarted_env( - context.clone(), - Kind::Scw, - environment_check1, - app_name.as_str(), - secrets.clone(), - ); + let (_, number) = + is_pod_restarted_env(context.clone(), Kind::Scw, environment_check1, app_name.as_str(), secrets.clone()); let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); - let (_, number2) = is_pod_restarted_env( - context.clone(), - Kind::Scw, - environment_check2, - app_name.as_str(), - secrets.clone(), - ); + let (_, number2) = + is_pod_restarted_env(context.clone(), Kind::Scw, environment_check2, app_name.as_str(), secrets.clone()); // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); @@ -813,10 +770,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_1, ); - assert!(matches!( - result, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // FAIL and Rollback again let result = not_working_env_2.deploy_environment( @@ -824,10 +778,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_2, ); - assert!(matches!( - result, - TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) - )); + assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); // Should be working let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); diff --git a/tests/scaleway/scw_object_storage.rs b/tests/scaleway/scw_object_storage.rs index 35d232af..632e0aaa 100644 --- a/tests/scaleway/scw_object_storage.rs +++ b/tests/scaleway/scw_object_storage.rs @@ -191,11 +191,8 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = scaleway_os.put( - bucket_name.as_str(), - object_key.as_str(), - temp_file.into_temp_path().to_str().unwrap(), - ); + let result = + scaleway_os.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); // validate: assert!(result.is_ok()); From b26fb355f603920e0f1188403124adc6fa750180 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 14:45:19 +0100 Subject: [PATCH 68/85] fmt --- rustfmt.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rustfmt.toml b/rustfmt.toml index 62a48d97..c82b93f5 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,4 @@ max_width = 120 fn_call_width = 100 -#attr_fn_like_width = 80 +attr_fn_like_width = 100 edition = "2018" From fc584141e66bbec63b09c8f58cc9a44253de3777 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 14:48:06 +0100 Subject: [PATCH 69/85] fmt --- rustfmt.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rustfmt.toml b/rustfmt.toml index c82b93f5..d5057429 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,5 @@ +edition = "2018" max_width = 120 fn_call_width = 100 attr_fn_like_width = 100 -edition = "2018" +use_field_init_shorthand = true From 603c5c3ff666e428aec27ac28a53d80a4b98dfcb Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 14:50:27 +0100 Subject: [PATCH 70/85] fmt --- rustfmt.toml | 4 +- src/build_platform/local_docker.rs | 34 +++- src/build_platform/mod.rs | 7 +- .../aws/kubernetes/helm_charts.rs | 15 +- src/cloud_provider/aws/kubernetes/mod.rs | 91 ++++++--- src/cloud_provider/aws/kubernetes/roles.rs | 5 +- src/cloud_provider/aws/regions.rs | 5 +- .../digitalocean/databases/mongodb.rs | 7 +- .../digitalocean/databases/mysql.rs | 7 +- .../digitalocean/databases/postgresql.rs | 7 +- .../digitalocean/databases/redis.rs | 7 +- .../digitalocean/do_api_common.rs | 6 +- .../digitalocean/kubernetes/doks_api.rs | 10 +- .../digitalocean/kubernetes/helm_charts.rs | 10 +- .../digitalocean/kubernetes/mod.rs | 82 ++++++--- .../digitalocean/network/vpc.rs | 18 +- src/cloud_provider/helm.rs | 29 ++- src/cloud_provider/kubernetes.rs | 73 +++++--- src/cloud_provider/qovery.rs | 15 +- .../scaleway/databases/mongodb.rs | 7 +- .../scaleway/databases/redis.rs | 7 +- .../scaleway/kubernetes/helm_charts.rs | 10 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 105 +++++++---- src/cloud_provider/service.rs | 51 ++++-- src/cloud_provider/utilities.rs | 47 +++-- src/cmd/command.rs | 11 +- src/cmd/docker.rs | 33 +++- src/cmd/helm.rs | 9 +- src/cmd/kubectl.rs | 34 +++- src/cmd/structs.rs | 10 +- src/cmd/terraform.rs | 9 +- src/container_registry/docr.rs | 7 +- src/dns_provider/cloudflare.rs | 4 +- src/error.rs | 6 +- src/errors/mod.rs | 173 +++++++++++++++--- src/events/mod.rs | 32 +++- src/fs.rs | 6 +- src/git.rs | 43 ++++- src/logger.rs | 35 +++- src/models.rs | 12 +- src/object_storage/s3.rs | 6 +- src/template.rs | 5 +- test_utilities/src/aws.rs | 9 +- test_utilities/src/digitalocean.rs | 9 +- test_utilities/src/scaleway.rs | 19 +- test_utilities/src/utilities.rs | 22 ++- tests/aws/aws_databases.rs | 5 +- tests/aws/aws_environment.rs | 40 +++- tests/aws/aws_kubernetes.rs | 5 +- tests/aws/aws_s3.rs | 6 +- tests/digitalocean/do_databases.rs | 13 +- tests/digitalocean/do_environment.rs | 57 +++++- tests/digitalocean/do_spaces.rs | 6 +- tests/scaleway/scw_databases.rs | 13 +- tests/scaleway/scw_environment.rs | 73 ++++++-- tests/scaleway/scw_object_storage.rs | 7 +- 56 files changed, 1067 insertions(+), 311 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index d5057429..abb6a864 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,5 +1,5 @@ edition = "2018" max_width = 120 -fn_call_width = 100 -attr_fn_like_width = 100 +fn_call_width = 80 +attr_fn_like_width = 80 use_field_init_shorthand = true diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 4c6d00b2..e627f123 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -131,7 +131,11 @@ impl LocalDocker { // Going to inject only env var that are used by the dockerfile // so extracting it and modifying the image tag and env variables let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { - BuildError::IoError(build.image.application_id.clone(), "reading dockerfile content".to_string(), err) + BuildError::IoError( + build.image.application_id.clone(), + "reading dockerfile content".to_string(), + err, + ) })?; let dockerfile_args = match extract_dockerfile_args(dockerfile_content) { Ok(dockerfile_args) => dockerfile_args, @@ -324,7 +328,11 @@ impl LocalDocker { format!("build/{}", build.image.name.as_str()), ) .map_err(|err| { - BuildError::IoError(build.image.application_id.clone(), "when creating build workspace".to_string(), err) + BuildError::IoError( + build.image.application_id.clone(), + "when creating build workspace".to_string(), + err, + ) }) } } @@ -358,15 +366,20 @@ impl BuildPlatform for LocalDocker { // LOGGING let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); - let msg = format!("📥 Cloning repository: {} to {:?}", build.git_repository.url, repository_root_path); + let msg = format!( + "📥 Cloning repository: {} to {:?}", + build.git_repository.url, repository_root_path + ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: app_id.clone() }, ProgressLevel::Info, Some(msg.clone()), self.context.execution_id(), )); - self.logger - .log(LogLevel::Info, EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))); + self.logger.log( + LogLevel::Info, + EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), + ); // LOGGING // Create callback that will be called by git to provide credentials per user @@ -382,7 +395,10 @@ impl BuildPlatform for LocalDocker { } if let Some(Credentials { login, password }) = &build.git_repository.credentials { - creds.push((CredentialType::USER_PASS_PLAINTEXT, Cred::userpass_plaintext(login, password).unwrap())); + creds.push(( + CredentialType::USER_PASS_PLAINTEXT, + Cred::userpass_plaintext(login, password).unwrap(), + )); } creds @@ -448,8 +464,10 @@ impl BuildPlatform for LocalDocker { // If the dockerfile does not exist, abort if !dockerfile_absolute_path.is_file() { - let msg = - format!("Specified dockerfile path {:?} does not exist within the repository", &dockerfile_path); + let msg = format!( + "Specified dockerfile path {:?} does not exist within the repository", + &dockerfile_path + ); return Err(BuildError::InvalidConfig(app_id, msg)); } diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index eeedb4a1..be1ce938 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -138,7 +138,12 @@ impl Image { &self.repository_name } pub fn full_image_name_with_tag(&self) -> String { - format!("{}/{}:{}", self.registry_url.host_str().unwrap_or_default(), self.name, self.tag) + format!( + "{}/{}:{}", + self.registry_url.host_str().unwrap_or_default(), + self.name, + self.tag + ) } pub fn full_image_name(&self) -> String { diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 6e9f7b71..081a9c7d 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -67,7 +67,10 @@ pub fn aws_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe.to_string()), + )); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -77,7 +80,10 @@ pub fn aws_helm_charts( Ok(config) => config, Err(e) => { let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )); } }; @@ -1348,7 +1354,10 @@ impl AwsVpcCniChart { "Error while getting daemonset info for chart {}, won't deploy CNI chart.", &self.chart_info.name ); - Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))) + Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )) } } } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 11676b2f..12243714 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -391,7 +391,10 @@ impl EKS { // Qovery features context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); - context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); + context.insert( + "metrics_history_enabled", + &self.context.is_feature_enabled(&Features::MetricsHistory), + ); // DNS configuration let managed_dns_list = vec![self.dns_provider.name()]; @@ -406,8 +409,14 @@ impl EKS { context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); - context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); - context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); + context.insert( + "managed_dns_domains_root_terraform_format", + &managed_dns_domains_root_terraform_format, + ); + context.insert( + "managed_dns_resolvers_terraform_format", + &managed_dns_resolvers_terraform_format, + ); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { @@ -478,8 +487,10 @@ impl EKS { .secret_access_key .as_str(), ); - context - .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); + context.insert( + "aws_region_tfstates_account", + self.cloud_provider().terraform_state_credentials().region.as_str(), + ); context.insert("aws_region", &self.region()); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); @@ -784,9 +795,10 @@ impl EKS { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self - .logger() - .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None))), + Ok(ok_line) => self.logger().log( + LogLevel::Info, + EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), + ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -984,8 +996,10 @@ impl EKS { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), + ); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), @@ -1072,11 +1086,16 @@ impl EKS { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = - format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); + let message = format!( + "Ensuring everything is up to date before deleting cluster {}/{}", + self.name(), + self.id() + ); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -1161,8 +1180,10 @@ impl EKS { } } Err(e) => { - let message_safe = - format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); + let message_safe = format!( + "Error while getting all namespaces for Kubernetes cluster {}", + self.name_with_id(), + ); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1185,9 +1206,11 @@ impl EKS { ); // delete custom metrics api to avoid stale namespaces on deletion - let helm = - Helm::new(&kubernetes_config_file_path, &self.cloud_provider.credentials_environment_variables()) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = Helm::new( + &kubernetes_config_file_path, + &self.cloud_provider.credentials_environment_variables(), + ) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; @@ -1325,8 +1348,10 @@ impl EKS { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -1356,9 +1381,10 @@ impl EKS { ); Ok(()) } - Err(Operation { error, .. }) => { - Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) - } + Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + error, + )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1473,7 +1499,12 @@ impl Kubernetes for EKS { let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!("Start preparing EKS upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), + format!( + "Start preparing EKS upgrade process {} cluster with id {}", + self.name(), + self.id() + ) + .as_str(), &listeners_helper, ); self.logger().log( @@ -1631,7 +1662,10 @@ impl Kubernetes for EKS { // disable cluster autoscaler to avoid interfering with AWS upgrade procedure context.insert("enable_cluster_autoscaler", &false); - context.insert("eks_workers_version", format!("{}", &kubernetes_upgrade_status.requested_version).as_str()); + context.insert( + "eks_workers_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1661,7 +1695,10 @@ impl Kubernetes for EKS { )); } - self.send_to_customer(format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), &listeners_helper); + self.send_to_customer( + format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), + &listeners_helper, + ); self.logger().log( LogLevel::Info, EngineEvent::Deploying( diff --git a/src/cloud_provider/aws/kubernetes/roles.rs b/src/cloud_provider/aws/kubernetes/roles.rs index 99439ae3..e4593425 100644 --- a/src/cloud_provider/aws/kubernetes/roles.rs +++ b/src/cloud_provider/aws/kubernetes/roles.rs @@ -79,7 +79,10 @@ impl Role { Ok(_) => Ok(true), Err(e) => { let safe_message = format!("Unable to know if `{}` exist on AWS Account", &self.role_name); - return Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message))); + return Err(CommandError::new( + format!("{}, error: {:?}", safe_message, e), + Some(safe_message), + )); } }; } diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index 8a942c26..3ede17b0 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -429,7 +429,10 @@ mod tests { let current_zone = AwsZones::from_string(sanitized_zone.to_lowercase()); assert_eq!(current_zone.unwrap(), zone); } - assert_eq!(AwsZones::from_string("eu-west-3x".to_string()), Err(RegionAndZoneErrors::ZoneNotSupported)); + assert_eq!( + AwsZones::from_string("eu-west-3x".to_string()), + Err(RegionAndZoneErrors::ZoneNotSupported) + ); } #[test] diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index a526f2bf..61c68859 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -66,7 +66,12 @@ impl MongoDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_mongodb_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index f49b78a0..b6d19aad 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -66,7 +66,12 @@ impl MySQLDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_mysql_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_mysql_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index 44b0309f..db1837d1 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -66,7 +66,12 @@ impl PostgresDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_postgres_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_postgres_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 3437f4a5..0ae77e6f 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -66,7 +66,12 @@ impl RedisDo { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_redis_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_redis_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/digitalocean/do_api_common.rs b/src/cloud_provider/digitalocean/do_api_common.rs index 296f9202..69471800 100644 --- a/src/cloud_provider/digitalocean/do_api_common.rs +++ b/src/cloud_provider/digitalocean/do_api_common.rs @@ -37,8 +37,10 @@ pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Res match response.status() { StatusCode::OK => Ok(response.text().expect("Cannot get response text")), StatusCode::UNAUTHORIZED => { - let message_safe = - format!("Could not get {} information, ensure your DigitalOcean token is valid.", api_type); + let message_safe = format!( + "Could not get {} information, ensure your DigitalOcean token is valid.", + api_type + ); return Err(CommandError::new( format!("{}, response: {:?}", message_safe, response), Some(message_safe), diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index c87b3f1e..3f393de8 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -26,7 +26,10 @@ pub fn get_doks_info_from_name( } Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()))); + return Err(CommandError::new( + format!("{}, error: {}", safe_message, e), + Some(safe_message.to_string()), + )); } } } @@ -47,7 +50,10 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result Ok(options.options.versions), Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()))); + return Err(CommandError::new( + format!("{}, error: {}", safe_message, e), + Some(safe_message.to_string()), + )); } } } diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 14024738..4bac2c68 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -121,7 +121,10 @@ pub fn do_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe.to_string()), + )); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -131,7 +134,10 @@ pub fn do_helm_charts( Ok(config) => config, Err(e) => { let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )); } }; diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 73b93782..85256d82 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -248,8 +248,14 @@ impl DOKS { context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); - context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); - context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); + context.insert( + "managed_dns_domains_root_terraform_format", + &managed_dns_domains_root_terraform_format, + ); + context.insert( + "managed_dns_resolvers_terraform_format", + &managed_dns_resolvers_terraform_format, + ); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); @@ -290,7 +296,10 @@ impl DOKS { // Qovery features context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); - context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); + context.insert( + "metrics_history_enabled", + &self.context.is_feature_enabled(&Features::MetricsHistory), + ); if self.context.resource_expiration_in_seconds().is_some() { context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } @@ -320,8 +329,10 @@ impl DOKS { .as_str(), ); - context - .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); + context.insert( + "aws_region_tfstates_account", + self.cloud_provider().terraform_state_credentials().region.as_str(), + ); context.insert("nginx_enable_horizontal_autoscaler", "true"); context.insert("nginx_minimum_replicas", "2"); @@ -427,7 +438,9 @@ impl DOKS { // TODO(benjaminch): `qovery-` to be added into Rust name directly everywhere match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id())) { Ok(cluster_result) => match cluster_result { - None => Err(CommandError::new_from_safe_message("Cluster doesn't exist on DO side.".to_string())), + None => Err(CommandError::new_from_safe_message( + "Cluster doesn't exist on DO side.".to_string(), + )), Some(cluster) => Ok(cluster), }, Err(e) => Err(e), @@ -443,7 +456,11 @@ impl DOKS { execution_id: self.context.execution_id().to_string(), }, ProgressLevel::Info, - Some(format!("start to create Digital Ocean Kubernetes cluster {} with id {}", self.name(), self.id())), + Some(format!( + "start to create Digital Ocean Kubernetes cluster {} with id {}", + self.name(), + self.id() + )), self.context.execution_id(), )); self.logger().log( @@ -753,9 +770,10 @@ impl DOKS { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self - .logger() - .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None))), + Ok(ok_line) => self.logger().log( + LogLevel::Info, + EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), + ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -866,11 +884,16 @@ impl DOKS { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = - format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); + let message = format!( + "Ensuring everything is up to date before deleting cluster {}/{}", + self.name(), + self.id() + ); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -958,8 +981,10 @@ impl DOKS { } } Err(e) => { - let message_safe = - format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); + let message_safe = format!( + "Error while getting all namespaces for Kubernetes cluster {}", + self.name_with_id(), + ); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1121,8 +1146,10 @@ impl DOKS { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -1152,9 +1179,10 @@ impl DOKS { ); Ok(()) } - Err(Operation { error, .. }) => { - Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) - } + Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + error, + )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1391,7 +1419,12 @@ impl Kubernetes for DOKS { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!("Start preparing DOKS upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), + format!( + "Start preparing DOKS upgrade process {} cluster with id {}", + self.name(), + self.id() + ) + .as_str(), &listeners_helper, ); self.logger().log( @@ -1482,7 +1515,10 @@ impl Kubernetes for DOKS { )); } - self.send_to_customer(format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper); + self.send_to_customer( + format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), + &listeners_helper, + ); self.logger().log( LogLevel::Info, EngineEvent::Deploying( diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 3ac4931e..563b9e26 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -128,7 +128,10 @@ fn do_get_vpcs_from_api_output(json_content: &str) -> Result, CommandEr Ok(vpcs) => Ok(vpcs.vpcs), Err(e) => { let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API"; - Err(CommandError::new(format!("{}, error: {}", message_safe, e), Some(message_safe.to_string()))) + Err(CommandError::new( + format!("{}, error: {}", message_safe, e), + Some(message_safe.to_string()), + )) } } } @@ -255,7 +258,10 @@ mod tests_do_vpcs { let vpc_subnets: Vec = vpcs.into_iter().map(|x| x.ip_range).collect(); let joined_subnets = vpc_subnets.join(","); - assert_eq!(joined_subnets, "10.2.0.0/16,10.110.0.0/20,10.116.0.0/20,10.1.0.0/16,10.0.0.0/16"); + assert_eq!( + joined_subnets, + "10.2.0.0/16,10.110.0.0/20,10.116.0.0/20,10.1.0.0/16,10.0.0.0/16" + ); } #[test] @@ -264,9 +270,11 @@ mod tests_do_vpcs { let vpcs = do_get_vpcs_from_api_output(&json_content).unwrap(); // available - assert!(get_do_vpc_from_subnet("10.3.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) - .unwrap() - .is_none()); + assert!( + get_do_vpc_from_subnet("10.3.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) + .unwrap() + .is_none() + ); // already used assert_eq!( get_do_vpc_from_subnet("10.2.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt) diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 41ddd4dd..fb3be15b 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -157,7 +157,10 @@ pub trait HelmChart: Send { if let Err(e) = fs::metadata(file) { let safe_message = format!("Can't access helm chart override file `{}` for chart `{}`", file, chart.name,); - return Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message))); + return Err(CommandError::new( + format!("{}, error: {:?}", safe_message, e), + Some(safe_message), + )); } } Ok(None) @@ -224,7 +227,10 @@ pub trait HelmChart: Send { match chart_info.action { HelmAction::Deploy => { if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { - warn!("error while trying to destroy chart if breaking change is detected: {:?}", e.to_string()); + warn!( + "error while trying to destroy chart if breaking change is detected: {:?}", + e.to_string() + ); } helm.upgrade(chart_info, &[]).map_err(to_command_error)?; @@ -299,8 +305,10 @@ fn deploy_parallel_charts( } Err(e) => { let safe_message = "Thread panicked during parallel charts deployments."; - let error = - Err(CommandError::new(format!("{}, error: {:?}", safe_message, e), Some(safe_message.to_string()))); + let error = Err(CommandError::new( + format!("{}, error: {:?}", safe_message, e), + Some(safe_message.to_string()), + )); errors.push(error); } } @@ -587,7 +595,10 @@ impl HelmChart for PrometheusOperatorConfigChart { match chart_info.action { HelmAction::Deploy => { if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { - warn!("error while trying to destroy chart if breaking change is detected: {}", e.to_string()); + warn!( + "error while trying to destroy chart if breaking change is detected: {}", + e.to_string() + ); } helm.upgrade(chart_info, &[]).map_err(to_command_error)?; @@ -653,8 +664,12 @@ pub fn get_chart_for_shell_agent( context: ShellAgentContext, chart_path: impl Fn(&str) -> String, ) -> Result { - let shell_agent_version: QoveryShellAgent = - get_qovery_app_version(QoveryAppName::ShellAgent, context.api_token, context.api_url, context.cluster_id)?; + let shell_agent_version: QoveryShellAgent = get_qovery_app_version( + QoveryAppName::ShellAgent, + context.api_token, + context.api_url, + context.cluster_id, + )?; let shell_agent = CommonChart { chart_info: ChartInfo { name: "shell-agent".to_string(), diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index f8068df8..7dfc441d 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -795,7 +795,11 @@ where EngineEvent::Deleting( event_details.clone(), EventMessage::new( - format!("Encountering issues while trying to get objects kind {}: {:?}", object, e.message()), + format!( + "Encountering issues while trying to get objects kind {}: {:?}", + object, + e.message() + ), None, ), ), @@ -804,8 +808,9 @@ where } // delete if resource exists - match retry::retry(Fibonacci::from_millis(5000).take(3), || { - match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { + match retry::retry( + Fibonacci::from_millis(5000).take(3), + || match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { Ok(_) => OperationResult::Ok(()), Err(e) => { logger.log( @@ -817,8 +822,8 @@ where ); OperationResult::Retry(e) } - } - }) { + }, + ) { Ok(_) => {} Err(Operation { error, .. }) => { return Err(EngineError::new_cannot_uninstall_helm_chart( @@ -861,7 +866,10 @@ where let masters_version = match VersionsNumber::from_str(raw_version.as_str()) { Ok(vn) => vn, Err(_) => { - return Err(EngineError::new_cannot_determine_k8s_master_version(event_details, raw_version.to_string())) + return Err(EngineError::new_cannot_determine_k8s_master_version( + event_details, + raw_version.to_string(), + )) } }; @@ -1057,12 +1065,14 @@ fn check_kubernetes_upgrade_status( } } Err(e) => { - return Err(EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details, - deployed_masters_version, - wished_version, - e, - )) + return Err( + EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( + event_details, + deployed_masters_version, + wished_version, + e, + ), + ) } }; @@ -1108,12 +1118,14 @@ fn check_kubernetes_upgrade_status( non_up_to_date_workers += 1; } Err(e) => { - return Err(EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details, - node, - wished_version, - e, - )) + return Err( + EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( + event_details, + node, + wished_version, + e, + ), + ) } } } @@ -1253,9 +1265,18 @@ where F: Fn() -> R, { let waiting_message = match action { - Action::Create => Some(format!("Infrastructure '{}' deployment is in progress...", kubernetes.name_with_id())), - Action::Pause => Some(format!("Infrastructure '{}' pause is in progress...", kubernetes.name_with_id())), - Action::Delete => Some(format!("Infrastructure '{}' deletion is in progress...", kubernetes.name_with_id())), + Action::Create => Some(format!( + "Infrastructure '{}' deployment is in progress...", + kubernetes.name_with_id() + )), + Action::Pause => Some(format!( + "Infrastructure '{}' pause is in progress...", + kubernetes.name_with_id() + )), + Action::Delete => Some(format!( + "Infrastructure '{}' deletion is in progress...", + kubernetes.name_with_id() + )), Action::Nothing => None, }; @@ -1393,7 +1414,10 @@ pub fn validate_k8s_required_cpu_and_burstable( context_id, )); - logger.log(LogLevel::Warning, EngineEvent::Warning(event_details, EventMessage::new_from_safe(message))); + logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details, EventMessage::new_from_safe(message)), + ); set_cpu_burst = total_cpu.clone(); } @@ -1414,7 +1438,10 @@ pub fn convert_k8s_cpu_value_to_f32(value: String) -> Result } Err(e) => Err(CommandError::new( e.to_string(), - Some(format!("Error while trying to parse `{}` to float 32.", value_number_string.as_str())), + Some(format!( + "Error while trying to parse `{}` to float 32.", + value_number_string.as_str() + )), )), }; } diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index 314b3d2f..d9a27611 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -52,15 +52,24 @@ pub fn get_qovery_app_version( QoveryAppName::ShellAgent => "shellAgent", }; - let url = format!("https://{}/api/v1/{}-version?type=cluster&clusterId={}", api_fqdn, app_type, cluster_id); + let url = format!( + "https://{}/api/v1/{}-version?type=cluster&clusterId={}", + api_fqdn, app_type, cluster_id + ); let message_safe = format!("Error while trying to get `{}` version.", app_type); match reqwest::blocking::Client::new().get(&url).headers(headers).send() { Ok(x) => match x.json::() { Ok(qa) => Ok(qa), - Err(e) => Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))), + Err(e) => Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )), }, - Err(e) => Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))), + Err(e) => Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )), } } diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index 0e552ce4..160094a0 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -66,7 +66,12 @@ impl MongoDbScw { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_mongodb_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 287b5162..6c7bc8a8 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -66,7 +66,12 @@ impl RedisScw { } fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version(get_self_hosted_redis_version(self.version()), self, event_details, self.logger()) + check_service_version( + get_self_hosted_redis_version(self.version()), + self, + event_details, + self.logger(), + ) } fn cloud_provider_name(&self) -> &str { diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 68cf8da6..089322c6 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -116,7 +116,10 @@ pub fn scw_helm_charts( Ok(x) => x, Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe.to_string()), + )); } }; let chart_prefix = chart_prefix_path.unwrap_or("./"); @@ -126,7 +129,10 @@ pub fn scw_helm_charts( Ok(config) => config, Err(e) => { let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); - return Err(CommandError::new(format!("{}, error: {:?}", message_safe, e), Some(message_safe))); + return Err(CommandError::new( + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), + )); } }; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index a4679af5..9de13154 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -271,9 +271,9 @@ impl Kapsule { let error_cluster_id = "expected cluster id for this Scaleway cluster".to_string(); let cluster_id = match cluster_info.id { None => { - return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new_from_safe_message( - error_cluster_id, - ))) + return Err(ScwNodeGroupErrors::NodeGroupValidationError( + CommandError::new_from_safe_message(error_cluster_id), + )) } Some(x) => x, }; @@ -292,7 +292,10 @@ impl Kapsule { Err(e) => { let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id); let msg_with_error = format!("{}. {:?}", msg, e); - return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new(msg_with_error, Some(msg)))); + return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( + msg_with_error, + Some(msg), + ))); } }; @@ -311,7 +314,10 @@ impl Kapsule { for ng in pools.pools.unwrap() { if ng.id.is_none() { let msg = format!("error while trying to validate SCW pool ID from cluster {}", &cluster_id); - return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new(msg.clone(), Some(msg)))); + return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new( + msg.clone(), + Some(msg), + ))); } let ng_sanitized = self.get_node_group_info(ng.id.unwrap().as_str())?; nodegroup_pool.push(ng_sanitized) @@ -439,8 +445,14 @@ impl Kapsule { context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); - context.insert("managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format); - context.insert("managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format); + context.insert( + "managed_dns_domains_root_terraform_format", + &managed_dns_domains_root_terraform_format, + ); + context.insert( + "managed_dns_resolvers_terraform_format", + &managed_dns_resolvers_terraform_format, + ); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); @@ -471,7 +483,10 @@ impl Kapsule { // Qovery features context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); - context.insert("metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory)); + context.insert( + "metrics_history_enabled", + &self.context.is_feature_enabled(&Features::MetricsHistory), + ); if self.context.resource_expiration_in_seconds().is_some() { context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } @@ -491,8 +506,10 @@ impl Kapsule { .secret_access_key .as_str(), ); - context - .insert("aws_region_tfstates_account", self.cloud_provider().terraform_state_credentials().region.as_str()); + context.insert( + "aws_region_tfstates_account", + self.cloud_provider().terraform_state_credentials().region.as_str(), + ); context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); @@ -768,7 +785,10 @@ impl Kapsule { Err(e) => { match e { ScwNodeGroupErrors::CloudProviderApiError(c) => { - return Err(EngineError::new_missing_api_info_from_cloud_provider_error(event_details, Some(c))) + return Err(EngineError::new_missing_api_info_from_cloud_provider_error( + event_details, + Some(c), + )) } ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log( LogLevel::Info, @@ -1028,9 +1048,10 @@ impl Kapsule { ); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self - .logger() - .log(LogLevel::Info, EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line))), + Ok(ok_line) => self.logger().log( + LogLevel::Info, + EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line)), + ), Err(err) => self.logger().log( LogLevel::Error, EngineEvent::Deploying( @@ -1229,8 +1250,10 @@ impl Kapsule { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), + ); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), @@ -1300,11 +1323,16 @@ impl Kapsule { // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process - let message = - format!("Ensuring everything is up to date before deleting cluster {}/{}", self.name(), self.id()); + let message = format!( + "Ensuring everything is up to date before deleting cluster {}/{}", + self.name(), + self.id() + ); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -1392,8 +1420,10 @@ impl Kapsule { } } Err(e) => { - let message_safe = - format!("Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(),); + let message_safe = format!( + "Error while getting all namespaces for Kubernetes cluster {}", + self.name_with_id(), + ); self.logger().log( LogLevel::Error, EngineEvent::Deleting( @@ -1555,8 +1585,10 @@ impl Kapsule { let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(LogLevel::Info, EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message))); + self.logger().log( + LogLevel::Info, + EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), + ); self.logger().log( LogLevel::Info, @@ -1586,9 +1618,10 @@ impl Kapsule { ); Ok(()) } - Err(Operation { error, .. }) => { - Err(EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, error)) - } + Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + error, + )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, CommandError::new(msg, None), @@ -1702,7 +1735,12 @@ impl Kubernetes for Kapsule { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); let listeners_helper = ListenersHelper::new(&self.listeners); self.send_to_customer( - format!("Start preparing Kapsule upgrade process {} cluster with id {}", self.name(), self.id()).as_str(), + format!( + "Start preparing Kapsule upgrade process {} cluster with id {}", + self.name(), + self.id() + ) + .as_str(), &listeners_helper, ); self.logger().log( @@ -1744,8 +1782,10 @@ impl Kubernetes for Kapsule { ), ); - context - .insert("kubernetes_cluster_version", format!("{}", &kubernetes_upgrade_status.requested_version).as_str()); + context.insert( + "kubernetes_cluster_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1773,7 +1813,10 @@ impl Kubernetes for Kapsule { )); } - self.send_to_customer(format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper); + self.send_to_customer( + format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), + &listeners_helper, + ); self.logger().log( LogLevel::Info, EngineEvent::Deploying( diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 3d975b89..3332a852 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -418,9 +418,11 @@ where })?; // do exec helm upgrade and return the last deployment status - let helm = - helm::Helm::new(&kubernetes_config_file_path, &kubernetes.cloud_provider().credentials_environment_variables()) - .map_err(|e| helm::to_engine_error(&event_details, e))?; + let helm = helm::Helm::new( + &kubernetes_config_file_path, + &kubernetes.cloud_provider().credentials_environment_variables(), + ) + .map_err(|e| helm::to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_custom_namespace( helm_release_name, workspace_dir.clone(), @@ -926,7 +928,11 @@ where )) } Err(_err) => { - let message = format!("{} version {} is not supported!", service.service_type().name(), service.version(),); + let message = format!( + "{} version {} is not supported!", + service.service_type().name(), + service.version(), + ); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -988,7 +994,12 @@ pub fn check_kubernetes_service_error( where T: Service + ?Sized, { - let message = format!("{} {} {}", action_verb, service.service_type().name().to_lowercase(), service.name()); + let message = format!( + "{} {} {}", + action_verb, + service.service_type().name().to_lowercase(), + service.name() + ); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -1007,8 +1018,10 @@ where } CheckAction::Pause => { listeners_helper.pause_in_progress(progress_info); - logger - .log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message))); + logger.log( + LogLevel::Info, + EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), + ); } CheckAction::Delete => { listeners_helper.delete_in_progress(progress_info); @@ -1242,12 +1255,16 @@ where service.service_type().name(), service.name_with_id() )), - Action::Pause => { - Some(format!("{} '{}' pause is in progress...", service.service_type().name(), service.name_with_id())) - } - Action::Delete => { - Some(format!("{} '{}' deletion is in progress...", service.service_type().name(), service.name_with_id())) - } + Action::Pause => Some(format!( + "{} '{}' pause is in progress...", + service.service_type().name(), + service.name_with_id() + )), + Action::Delete => Some(format!( + "{} '{}' deletion is in progress...", + service.service_type().name(), + service.name_with_id() + )), Action::Nothing => None, }; @@ -1270,8 +1287,12 @@ where let logger = service.logger().clone_dyn(); let listeners = std::clone::Clone::clone(service.listeners()); - let progress_info = - ProgressInfo::new(service.progress_scope(), Info, waiting_message.clone(), service.context().execution_id()); + let progress_info = ProgressInfo::new( + service.progress_scope(), + Info, + waiting_message.clone(), + service.context().execution_id(), + ); let (tx, rx) = mpsc::channel(); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index a1ee5989..87e0cb2b 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -183,8 +183,10 @@ pub fn generate_supported_version( } else { for minor in minor_min..minor_max + 1 { // add short minor format targeting latest version - supported_versions - .insert(format!("{}.{}", major, minor), format!("{}.{}.{}", major, minor, update_max.unwrap())); + supported_versions.insert( + format!("{}.{}", major, minor), + format!("{}.{}.{}", major, minor, update_max.unwrap()), + ); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); @@ -238,7 +240,11 @@ impl VersionsNumber { } pub fn to_major_minor_version_string(&self, default_minor: &str) -> String { - let test = format!("{}.{}", self.major.clone(), self.minor.as_ref().unwrap_or(&default_minor.to_string())); + let test = format!( + "{}.{}", + self.major.clone(), + self.minor.as_ref().unwrap_or(&default_minor.to_string()) + ); test } @@ -374,7 +380,11 @@ pub fn check_cname_for( }; send_deployment_progress( - format!("Checking CNAME resolution of '{}'. Please wait, it can take some time...", cname_to_check).as_str(), + format!( + "Checking CNAME resolution of '{}'. Please wait, it can take some time...", + cname_to_check + ) + .as_str(), ); // Trying for 5 min to resolve CNAME @@ -385,12 +395,14 @@ pub fn check_cname_for( resolver }; let fixed_iterable = Fixed::from_millis(Duration::seconds(5).num_milliseconds() as u64).take(6 * 5); - let check_result = retry::retry(fixed_iterable, || match get_cname_record_value(next_resolver(), cname_to_check) { - Some(domain) => OperationResult::Ok(domain), - None => { - let msg = format!("Cannot find domain under CNAME {}. Retrying in 5 seconds...", cname_to_check); - send_deployment_progress(msg.as_str()); - OperationResult::Retry(msg) + let check_result = retry::retry(fixed_iterable, || { + match get_cname_record_value(next_resolver(), cname_to_check) { + Some(domain) => OperationResult::Ok(domain), + None => { + let msg = format!("Cannot find domain under CNAME {}. Retrying in 5 seconds...", cname_to_check); + send_deployment_progress(msg.as_str()); + OperationResult::Retry(msg) + } } }); @@ -422,7 +434,10 @@ pub fn check_domain_for( let resolvers = dns_resolvers(); for domain in domains_to_check { - let message = format!("Let's check domain resolution for '{}'. Please wait, it can take some time...", domain); + let message = format!( + "Let's check domain resolution for '{}'. Please wait, it can take some time...", + domain + ); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -537,8 +552,14 @@ pub fn print_action( ) { let msg = format!("{}.{}.{} called for {}", cloud_provider_name, struct_name, fn_name, item_name); match fn_name.contains("error") { - true => logger.log(LogLevel::Warning, EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg))), - false => logger.log(LogLevel::Info, EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))), + true => logger.log( + LogLevel::Warning, + EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg)), + ), + false => logger.log( + LogLevel::Info, + EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), + ), } } diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 5167d215..83cd74a8 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -116,7 +116,11 @@ impl QoveryCommand { } pub fn exec(&mut self) -> Result<(), CommandError> { - self.exec_with_abort(&mut |line| info!("{}", line), &mut |line| warn!("{}", line), &CommandKiller::never()) + self.exec_with_abort( + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), + &CommandKiller::never(), + ) } pub fn exec_with_output( @@ -273,7 +277,10 @@ impl QoveryCommand { } if !exit_status.success() { - debug!("command: {:?} terminated with error exist status {:?}", self.command, exit_status); + debug!( + "command: {:?} terminated with error exist status {:?}", + self.command, exit_status + ); return Err(ExitStatusError(exit_status)); } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index d523ac77..fa435cd6 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -81,8 +81,13 @@ impl Docker { // First check that the buildx plugin is correctly installed let args = vec!["buildx", "version"]; - let buildx_cmd_exist = - docker_exec(&args, &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); + let buildx_cmd_exist = docker_exec( + &args, + &docker.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); if buildx_cmd_exist.is_err() { return Err(DockerError::InvalidConfig( "Docker buildx plugin for buildkit is not correctly installed".to_string(), @@ -101,7 +106,13 @@ impl Docker { "--bootstrap", "--use", ]; - let _ = docker_exec(&args, &docker.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); + let _ = docker_exec( + &args, + &docker.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); Ok(docker) } @@ -188,7 +199,13 @@ impl Docker { { info!("Docker pull {:?}", image); - docker_exec(&["pull", &image.image_name()], &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort) + docker_exec( + &["pull", &image.image_name()], + &self.get_all_envs(&[]), + stdout_output, + stderr_output, + should_abort, + ) } pub fn build( @@ -406,7 +423,13 @@ impl Docker { let mut errored_commands = vec![]; for prune in all_prunes_commands { - let ret = docker_exec(&prune, &self.get_all_envs(&[]), &mut |_| {}, &mut |_| {}, &CommandKiller::never()); + let ret = docker_exec( + &prune, + &self.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); if let Err(e) = ret { errored_commands.push(e); } diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index 54b6024a..16d319c2 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -129,9 +129,12 @@ impl Helm { let mut stdout = String::new(); let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |line| stdout.push_str(&line), &mut |line| { - stderr.push_str(&line) - }) { + match helm_exec_with_output( + &args, + &self.get_all_envs(envs), + &mut |line| stdout.push_str(&line), + &mut |line| stderr.push_str(&line), + ) { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { stderr.push_str(&err.message()); diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index f49b4616..369488f6 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -412,7 +412,9 @@ where P: AsRef, { if labels.is_empty() { - return Err(CommandError::new_from_safe_message("No labels were defined, can't set them".to_string())); + return Err(CommandError::new_from_safe_message( + "No labels were defined, can't set them".to_string(), + )); }; if !kubectl_exec_is_namespace_present(kubernetes_config.as_ref(), namespace, envs.clone()) { @@ -436,8 +438,9 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs.clone()); - let _ = - kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| error!("{}", line))?; + let _ = kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + })?; Ok(()) } @@ -545,10 +548,12 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs); - let _ = - kubectl_exec_with_output(vec!["delete", "crd", crd_name], _envs, &mut |line| info!("{}", line), &mut |line| { - error!("{}", line) - })?; + let _ = kubectl_exec_with_output( + vec!["delete", "crd", crd_name], + _envs, + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), + )?; Ok(()) } @@ -668,7 +673,9 @@ where environment_variables.push(("KUBECONFIG", kubernetes_config.as_ref().to_str().unwrap())); let args = vec!["-n", namespace, "rollout", "restart", "deployment", name]; - kubectl_exec_with_output(args, environment_variables, &mut |line| info!("{}", line), &mut |line| error!("{}", line)) + kubectl_exec_with_output(args, environment_variables, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + }) } pub fn kubectl_exec_get_node

( @@ -767,7 +774,11 @@ pub fn kubectl_exec_get_configmap

( where P: AsRef, { - kubectl_exec::(vec!["get", "configmap", "-o", "json", "-n", namespace, name], kubernetes_config, envs) + kubectl_exec::( + vec!["get", "configmap", "-o", "json", "-n", namespace, name], + kubernetes_config, + envs, + ) } pub fn kubectl_exec_get_json_events

( @@ -857,7 +868,10 @@ where P: AsRef, { let pods = specific_pod_name.unwrap_or("*"); - let api_url = format!("/apis/custom.metrics.k8s.io/v1beta1/namespaces/{}/pods/{}/{}", namespace, pods, metric_name); + let api_url = format!( + "/apis/custom.metrics.k8s.io/v1beta1/namespaces/{}/pods/{}/{}", + namespace, pods, metric_name + ); kubectl_exec::(vec!["get", "--raw", api_url.as_str()], kubernetes_config, envs) } diff --git a/src/cmd/structs.rs b/src/cmd/structs.rs index 54c7ff67..ea39b03f 100644 --- a/src/cmd/structs.rs +++ b/src/cmd/structs.rs @@ -1311,7 +1311,10 @@ mod tests { assert_eq!(pod_status.is_ok(), true); let pod_status = pod_status.unwrap(); assert_eq!(pod_status.items[0].status.conditions[0].status, "False"); - assert_eq!(pod_status.items[0].status.conditions[0].reason, KubernetesPodStatusReason::CrashLoopBackOff); + assert_eq!( + pod_status.items[0].status.conditions[0].reason, + KubernetesPodStatusReason::CrashLoopBackOff + ); let payload = r#"{ "apiVersion": "v1", @@ -1584,7 +1587,10 @@ mod tests { let pod_status = serde_json::from_str::>(payload); assert!(pod_status.is_ok()); - assert_eq!(pod_status.unwrap().items[0].status.conditions[0].reason, KubernetesPodStatusReason::Unknown(None)); + assert_eq!( + pod_status.unwrap().items[0].status.conditions[0].reason, + KubernetesPodStatusReason::Unknown(None) + ); } #[test] diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index c9af2b30..8662d99e 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -27,7 +27,10 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re Ok(_) => Ok(()), Err(e) => Err(CommandError::new( format!("Wasn't able to delete terraform lock file {}", &terraform_provider_lock), - Some(format!("Wasn't able to delete terraform lock file {}, error: {:?}", &terraform_provider_lock, e)), + Some(format!( + "Wasn't able to delete terraform lock file {}, error: {:?}", + &terraform_provider_lock, e + )), )), }; } else if err.message().contains("Plugin reinitialization required") { @@ -35,7 +38,9 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re return Ok(()); } - Err(CommandError::new_from_safe_message("Not known method to fix this Terraform issue".to_string())) + Err(CommandError::new_from_safe_message( + "Not known method to fix this Terraform issue".to_string(), + )) } fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> { diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 4eb68f5e..e369495a 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -148,8 +148,11 @@ impl DOCR { } pub fn exec_docr_login(&self) -> Result<(), ContainerRegistryError> { - let mut cmd = - QoveryCommand::new("doctl", &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], &[]); + let mut cmd = QoveryCommand::new( + "doctl", + &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], + &[], + ); match cmd.exec() { Ok(_) => Ok(()), diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 5e257c11..134c7b7a 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -73,7 +73,9 @@ impl DnsProvider for Cloudflare { fn is_valid(&self) -> Result<(), EngineError> { if self.cloudflare_api_token.is_empty() || self.cloudflare_email.is_empty() { - Err(EngineError::new_client_invalid_cloud_provider_credentials(self.get_event_details())) + Err(EngineError::new_client_invalid_cloud_provider_credentials( + self.get_event_details(), + )) } else { Ok(()) } diff --git a/src/error.rs b/src/error.rs index 00fd95a5..0cc266dd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -111,7 +111,11 @@ pub fn cast_simple_error_to_engine_error>( Err(simple_error) => { let message = match simple_error.kind { SimpleErrorKind::Command(exit_status) => { - format!("{} ({})", simple_error.message.unwrap_or_else(|| "".into()), exit_status) + format!( + "{} ({})", + simple_error.message.unwrap_or_else(|| "".into()), + exit_status + ) } SimpleErrorKind::Other => simple_error.message.unwrap_or_else(|| "".into()), }; diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 2871e4e8..9cc9c1eb 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -469,7 +469,15 @@ impl EngineError { link: Option, hint_message: Option, ) -> EngineError { - EngineError::new(event_details, Tag::Unknown, qovery_log_message, user_log_message, message, link, hint_message) + EngineError::new( + event_details, + Tag::Unknown, + qovery_log_message, + user_log_message, + message, + link, + hint_message, + ) } /// Creates new error for missing required env variable. @@ -482,7 +490,15 @@ impl EngineError { /// * `variable_name`: Variable name which is not set. pub fn new_missing_required_env_variable(event_details: EventDetails, variable_name: String) -> EngineError { let message = format!("`{}` environment variable wasn't found.", variable_name); - EngineError::new(event_details, Tag::MissingRequiredEnvVariable, message.to_string(), message, None, None, None) + EngineError::new( + event_details, + Tag::MissingRequiredEnvVariable, + message.to_string(), + message, + None, + None, + None, + ) } /// Creates new error for cluster has no worker nodes. @@ -1175,7 +1191,15 @@ impl EngineError { selector, namespace ); - EngineError::new(event_details, Tag::K8sGetLogs, message.to_string(), message, Some(raw_error), None, None) + EngineError::new( + event_details, + Tag::K8sGetLogs, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) } /// Creates new error for kubernetes get events. @@ -1192,7 +1216,15 @@ impl EngineError { ) -> EngineError { let message = format!("Error, unable to retrieve events in namespace `{}`.", namespace); - EngineError::new(event_details, Tag::K8sGetLogs, message.to_string(), message, Some(raw_error), None, None) + EngineError::new( + event_details, + Tag::K8sGetLogs, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) } /// Creates new error for kubernetes describe. @@ -1209,10 +1241,20 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = - format!("Error, unable to describe pod with selector `{}` in namespace `{}`.", selector, namespace); + let message = format!( + "Error, unable to describe pod with selector `{}` in namespace `{}`.", + selector, namespace + ); - EngineError::new(event_details, Tag::K8sDescribe, message.to_string(), message, Some(raw_error), None, None) + EngineError::new( + event_details, + Tag::K8sDescribe, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) } /// Creates new error for kubernetes history. @@ -1225,7 +1267,15 @@ impl EngineError { pub fn new_k8s_history(event_details: EventDetails, namespace: String, raw_error: CommandError) -> EngineError { let message = format!("Error, unable to get history in namespace `{}`.", namespace); - EngineError::new(event_details, Tag::K8sHistory, message.to_string(), message, Some(raw_error), None, None) + EngineError::new( + event_details, + Tag::K8sHistory, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) } /// Creates new error for kubernetes namespace creation issue. @@ -1267,7 +1317,10 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = format!("Error, pod with selector `{}` in namespace `{}` is not ready.", selector, namespace); + let message = format!( + "Error, pod with selector `{}` in namespace `{}` is not ready.", + selector, namespace + ); EngineError::new( event_details, @@ -1364,7 +1417,15 @@ impl EngineError { pub fn new_missing_required_binary(event_details: EventDetails, missing_binary_name: String) -> EngineError { let message = format!("`{}` binary is required but was not found.", missing_binary_name); - EngineError::new(event_details, Tag::CannotFindRequiredBinary, message.to_string(), message, None, None, None) + EngineError::new( + event_details, + Tag::CannotFindRequiredBinary, + message.to_string(), + message, + None, + None, + None, + ) } /// Creates new error for subnets count not being even. Subnets count should be even to get the same number as private and public. @@ -1384,7 +1445,15 @@ impl EngineError { zone_name, subnets_count, ); - EngineError::new(event_details, Tag::SubnetsCountShouldBeEven, message.to_string(), message, None, None, None) + EngineError::new( + event_details, + Tag::SubnetsCountShouldBeEven, + message.to_string(), + message, + None, + None, + None, + ) } /// Creates new error for IAM role which cannot be retrieved or created. @@ -1568,8 +1637,10 @@ impl EngineError { parameter_value: String, raw_error: Option, ) -> EngineError { - let message = - format!("{} value `{}` not supported for parameter `{}`", service_type, parameter_value, parameter_name,); + let message = format!( + "{} value `{}` not supported for parameter `{}`", + service_type, parameter_value, parameter_name, + ); EngineError::new( event_details, @@ -1667,7 +1738,15 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `error`: Raw error message. pub fn new_build_error(event_details: EventDetails, error: BuildError) -> EngineError { - EngineError::new(event_details, Tag::BuilderError, error.to_string(), error.to_string(), None, None, None) + EngineError::new( + event_details, + Tag::BuilderError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) } /// Creates new error from an Container Registry error @@ -1732,8 +1811,10 @@ impl EngineError { namespace: String, raw_error: CommandError, ) -> EngineError { - let message = - format!("Error while trying to get helm chart `{}` history in namespace `{}`.", helm_chart, namespace); + let message = format!( + "Error while trying to get helm chart `{}` history in namespace `{}`.", + helm_chart, namespace + ); EngineError::new( event_details, @@ -1805,7 +1886,15 @@ impl EngineError { ) -> EngineError { let message = format!("Error, version `{}` is not supported for `{}`.", version, product_name); - EngineError::new(event_details, Tag::UnsupportedVersion, message.to_string(), message, None, None, None) + EngineError::new( + event_details, + Tag::UnsupportedVersion, + message.to_string(), + message, + None, + None, + None, + ) } /// Creates new error while trying to get cluster. @@ -1868,7 +1957,10 @@ impl EngineError { service_name: String, ) -> EngineError { // TODO(benjaminch): Service should probably passed otherwise, either inside event_details or via a new dedicated struct. - let message = format!("Service `{}` (id `{}`) failed to deploy (before start).", service_name, service_id); + let message = format!( + "Service `{}` (id `{}`) failed to deploy (before start).", + service_name, service_id + ); EngineError::new( event_details, @@ -1895,8 +1987,10 @@ impl EngineError { service_type: String, raw_error: Option, ) -> EngineError { - let message = - format!("Database `{}` (id `{}`) failed to start after several retries.", service_type, service_id); + let message = format!( + "Database `{}` (id `{}`) failed to start after several retries.", + service_type, service_id + ); EngineError::new( event_details, @@ -2203,7 +2297,15 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `error`: Raw error message. pub fn new_docker_error(event_details: EventDetails, error: DockerError) -> EngineError { - EngineError::new(event_details, Tag::DockerError, error.to_string(), error.to_string(), None, None, None) + EngineError::new( + event_details, + Tag::DockerError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) } /// Creates new error when trying to push a Docker image. @@ -2220,8 +2322,10 @@ impl EngineError { repository_url: String, raw_error: CommandError, ) -> EngineError { - let message = - format!("Error, trying to push Docker image `{}` to repository `{}`.", image_name, repository_url); + let message = format!( + "Error, trying to push Docker image `{}` to repository `{}`.", + image_name, repository_url + ); EngineError::new( event_details, @@ -2248,8 +2352,10 @@ impl EngineError { repository_url: String, raw_error: CommandError, ) -> EngineError { - let message = - format!("Error, trying to pull Docker image `{}` from repository `{}`.", image_name, repository_url); + let message = format!( + "Error, trying to pull Docker image `{}` from repository `{}`.", + image_name, repository_url + ); EngineError::new( event_details, @@ -2399,8 +2505,10 @@ impl EngineError { event_details: EventDetails, repository_name: String, ) -> EngineError { - let message = - format!("Failed to retrieve credentials and endpoint URL from container registry `{}`.", repository_name,); + let message = format!( + "Failed to retrieve credentials and endpoint URL from container registry `{}`.", + repository_name, + ); EngineError::new( event_details, @@ -2617,7 +2725,10 @@ impl EngineError { file_name: String, raw_error: ObjectStorageError, ) -> EngineError { - let message = format!("Error, cannot put file `{}` into object storage bucket `{}`.", file_name, bucket_name,); + let message = format!( + "Error, cannot put file `{}` into object storage bucket `{}`.", + file_name, bucket_name, + ); EngineError::new( event_details, @@ -2692,8 +2803,10 @@ impl EngineError { bucket_name: String, raw_error: CommandError, ) -> EngineError { - let message = - format!("Error while trying to activate versioning for object storage bucket `{}`.", bucket_name,); + let message = format!( + "Error while trying to activate versioning for object storage bucket `{}`.", + bucket_name, + ); EngineError::new( event_details, diff --git a/src/events/mod.rs b/src/events/mod.rs index b8c9e2ab..b043939e 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -458,7 +458,12 @@ mod tests { fn test_event_message() { // setup: let test_cases: Vec<(String, Option, EventMessageVerbosity, String)> = vec![ - ("safe".to_string(), Some("raw".to_string()), EventMessageVerbosity::SafeOnly, "safe".to_string()), + ( + "safe".to_string(), + Some("raw".to_string()), + EventMessageVerbosity::SafeOnly, + "safe".to_string(), + ), ("safe".to_string(), None, EventMessageVerbosity::SafeOnly, "safe".to_string()), ("safe".to_string(), None, EventMessageVerbosity::FullDetails, "safe".to_string()), ( @@ -483,11 +488,26 @@ mod tests { fn test_stage_sub_step_name() { // setup: let test_cases: Vec<(Stage, String)> = vec![ - (Stage::Infrastructure(InfrastructureStep::Create), InfrastructureStep::Create.to_string()), - (Stage::Infrastructure(InfrastructureStep::Upgrade), InfrastructureStep::Upgrade.to_string()), - (Stage::Infrastructure(InfrastructureStep::Delete), InfrastructureStep::Delete.to_string()), - (Stage::Infrastructure(InfrastructureStep::Resume), InfrastructureStep::Resume.to_string()), - (Stage::Infrastructure(InfrastructureStep::Pause), InfrastructureStep::Pause.to_string()), + ( + Stage::Infrastructure(InfrastructureStep::Create), + InfrastructureStep::Create.to_string(), + ), + ( + Stage::Infrastructure(InfrastructureStep::Upgrade), + InfrastructureStep::Upgrade.to_string(), + ), + ( + Stage::Infrastructure(InfrastructureStep::Delete), + InfrastructureStep::Delete.to_string(), + ), + ( + Stage::Infrastructure(InfrastructureStep::Resume), + InfrastructureStep::Resume.to_string(), + ), + ( + Stage::Infrastructure(InfrastructureStep::Pause), + InfrastructureStep::Pause.to_string(), + ), ( Stage::Infrastructure(InfrastructureStep::LoadConfiguration), InfrastructureStep::LoadConfiguration.to_string(), diff --git a/src/fs.rs b/src/fs.rs index e5364cca..7a68a48f 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -113,7 +113,11 @@ pub fn cleanup_workspace_directory(working_root_dir: &str, execution_id: &str) - Err(err) => { error!( "{}", - format!("error trying to remove workspace directory '{}', error: {}", workspace_dir.as_str(), err) + format!( + "error trying to remove workspace directory '{}', error: {}", + workspace_dir.as_str(), + err + ) ); Err(err) } diff --git a/src/git.rs b/src/git.rs index 8831df13..44a50978 100644 --- a/src/git.rs +++ b/src/git.rs @@ -49,7 +49,10 @@ fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, .find_remote("origin") .map(|remote| remote.url().unwrap_or_default().to_string()) .unwrap_or_default(); - let msg = format!("Unable to use git object commit ID {} on repository {}: {}", &commit_id, &repo_url, &err); + let msg = format!( + "Unable to use git object commit ID {} on repository {}: {}", + &commit_id, &repo_url, &err + ); Error::from_str(&msg) })?; @@ -188,11 +191,19 @@ mod tests { let repo_path = repo_dir.path(); // We only allow https:// at the moment - let repo = clone(&Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), &repo_path, &|_| vec![]); + let repo = clone( + &Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), + &repo_path, + &|_| vec![], + ); assert!(matches!(repo, Err(e) if e.message().contains("https://"))); // Repository must be empty - let repo = clone(&Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), &repo_path, &|_| vec![]); + let repo = clone( + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), + &repo_path, + &|_| vec![], + ); assert!(repo.is_ok()); // clone makes sure to empty the directory // Working case @@ -210,10 +221,16 @@ mod tests { { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_clone".to_string()); let get_credentials = |_: &str| { - vec![(CredentialType::USER_PASS_PLAINTEXT, Cred::userpass_plaintext("FAKE", "FAKE").unwrap())] + vec![( + CredentialType::USER_PASS_PLAINTEXT, + Cred::userpass_plaintext("FAKE", "FAKE").unwrap(), + )] }; - let repo = - clone(&Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), clone_dir.path(), &get_credentials); + let repo = clone( + &Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), + clone_dir.path(), + &get_credentials, + ); assert!(matches!(repo, Err(repo) if repo.message().contains("authentication"))); } @@ -246,9 +263,12 @@ mod tests { #[test] fn test_git_checkout() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_checkout".to_string()); - let repo = - clone(&Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![]) - .unwrap(); + let repo = clone( + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), + clone_dir.path(), + &|_| vec![], + ) + .unwrap(); // Invalid commit for this repository let check = checkout(&repo, "c2c2101f8e4c4ffadb326dc440ba8afb4aeb1310"); @@ -305,7 +325,10 @@ mod tests { CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &invalid_ssh_key, Some("toto")).unwrap(), ), - (CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &ssh_key, None).unwrap()), + ( + CredentialType::SSH_MEMORY, + Cred::ssh_key_from_memory(user, None, &ssh_key, None).unwrap(), + ), ( CredentialType::SSH_MEMORY, Cred::ssh_key_from_memory(user, None, &invalid_ssh_key, Some("toto")).unwrap(), diff --git a/src/logger.rs b/src/logger.rs index e99ea075..2d96b8d3 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -129,7 +129,10 @@ mod tests { ), qovery_message.to_string(), user_message.to_string(), - Some(errors::CommandError::new(safe_message.to_string(), Some(raw_message.to_string()))), + Some(errors::CommandError::new( + safe_message.to_string(), + Some(raw_message.to_string()), + )), Some(link), Some(hint.to_string()), ), @@ -205,9 +208,21 @@ mod tests { tc.description ); - assert!(logs_contain(format!("organization_id=\"{}\"", orga_id.short()).as_str()), "{}", tc.description); - assert!(logs_contain(format!("cluster_id=\"{}\"", cluster_id.short()).as_str()), "{}", tc.description); - assert!(logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), "{}", tc.description); + assert!( + logs_contain(format!("organization_id=\"{}\"", orga_id.short()).as_str()), + "{}", + tc.description + ); + assert!( + logs_contain(format!("cluster_id=\"{}\"", cluster_id.short()).as_str()), + "{}", + tc.description + ); + assert!( + logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), + "{}", + tc.description + ); let details = tc.event.get_details(); assert!( @@ -240,13 +255,21 @@ mod tests { tc.description ); - assert!(logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), "{}", tc.description); + assert!( + logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), + "{}", + tc.description + ); assert!( logs_contain(format!("step=\"{}\"", details.stage().sub_step_name()).as_str()), "{}", tc.description ); - assert!(logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), "{}", tc.description); + assert!( + logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), + "{}", + tc.description + ); // Logger should display everything assert!(logs_contain(safe_message), "{}", tc.description); diff --git a/src/models.rs b/src/models.rs index 109c410d..c2126e5d 100644 --- a/src/models.rs +++ b/src/models.rs @@ -1426,8 +1426,16 @@ mod tests { let result = QoveryIdentifier::new_from_long_id(tc.input.clone()); // verify: - assert_eq!(tc.expected_long_id_output, result.raw_long_id, "case {} : '{}'", tc.description, tc.input); - assert_eq!(tc.expected_short_output, result.short, "case {} : '{}'", tc.description, tc.input); + assert_eq!( + tc.expected_long_id_output, result.raw_long_id, + "case {} : '{}'", + tc.description, tc.input + ); + assert_eq!( + tc.expected_short_output, result.short, + "case {} : '{}'", + tc.description, tc.input + ); } } } diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 5d4fbbcb..886d962f 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -60,8 +60,10 @@ impl S3 { fn get_s3_client(&self) -> S3Client { let region = RusotoRegion::from_str(&self.region.to_aws_format()) .unwrap_or_else(|_| panic!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format())); - let client = - Client::new_with(self.get_credentials(), HttpClient::new().expect("unable to create new Http client")); + let client = Client::new_with( + self.get_credentials(), + HttpClient::new().expect("unable to create new Http client"), + ); S3Client::new_with_client(client, region) } diff --git a/src/template.rs b/src/template.rs index 1cf101fd..670da04c 100644 --- a/src/template.rs +++ b/src/template.rs @@ -23,7 +23,10 @@ where tera::ErrorKind::TemplateNotFound(x) => format!("template not found: {}", x), tera::ErrorKind::Msg(x) => format!("tera error: {}", x), tera::ErrorKind::CircularExtend { tpl, inheritance_chain } => { - format!("circular extend - template: {}, inheritance chain: {:?}", tpl, inheritance_chain) + format!( + "circular extend - template: {}, inheritance chain: {:?}", + tpl, inheritance_chain + ) } tera::ErrorKind::MissingParent { current, parent } => { format!("missing parent - current: {}, parent: {}", current, parent) diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index d2eae14f..41f59b17 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -93,7 +93,14 @@ impl Cluster for AWS { vpc_network_mode, ); - EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, k) + EngineConfig::new( + context.clone(), + build_platform, + container_registry, + cloud_provider, + dns_provider, + k, + ) } fn cloud_provider(context: &Context) -> Box { diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 8339566b..15eb7f30 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -76,7 +76,14 @@ impl Cluster for DO { vpc_network_mode, ); - EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, k) + EngineConfig::new( + context.clone(), + build_platform, + container_registry, + cloud_provider, + dns_provider, + k, + ) } fn cloud_provider(context: &Context) -> Box { diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 31da6441..1941a90b 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -104,7 +104,14 @@ impl Cluster for Scaleway { vpc_network_mode, ); - EngineConfig::new(context.clone(), build_platform, container_registry, cloud_provider, dns_provider, cluster) + EngineConfig::new( + context.clone(), + build_platform, + container_registry, + cloud_provider, + dns_provider, + cluster, + ) } fn cloud_provider(context: &Context) -> Box { @@ -222,8 +229,14 @@ pub fn clean_environments( let secret_token = secrets.SCALEWAY_SECRET_KEY.unwrap(); let project_id = secrets.SCALEWAY_DEFAULT_PROJECT_ID.unwrap(); - let container_registry_client = - ScalewayCR::new(context.clone(), "test", "test", secret_token.as_str(), project_id.as_str(), zone)?; + let container_registry_client = ScalewayCR::new( + context.clone(), + "test", + "test", + secret_token.as_str(), + project_id.as_str(), + zone, + )?; // delete images created in registry let registry_url = container_registry_client.registry_info(); diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 42b6d3ac..58bf34f4 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -169,13 +169,21 @@ impl FuncTestsSecrets { fn get_vault_config() -> Result { let vault_addr = match env::var_os("VAULT_ADDR") { Some(x) => x.into_string().unwrap(), - None => return Err(Error::new(ErrorKind::NotFound, format!("VAULT_ADDR environment variable is missing"))), + None => { + return Err(Error::new( + ErrorKind::NotFound, + format!("VAULT_ADDR environment variable is missing"), + )) + } }; let vault_token = match env::var_os("VAULT_TOKEN") { Some(x) => x.into_string().unwrap(), None => { - return Err(Error::new(ErrorKind::NotFound, format!("VAULT_TOKEN environment variable is missing"))) + return Err(Error::new( + ErrorKind::NotFound, + format!("VAULT_TOKEN environment variable is missing"), + )) } }; @@ -370,7 +378,10 @@ pub fn init() -> Instant { None => tracing_subscriber::fmt().try_init(), }; - info!("running from current directory: {}", std::env::current_dir().unwrap().to_str().unwrap()); + info!( + "running from current directory: {}", + std::env::current_dir().unwrap().to_str().unwrap() + ); Instant::now() } @@ -701,7 +712,10 @@ fn get_cloud_provider_credentials(provider_kind: Kind, secrets: &FuncTestsSecret Kind::Scw => vec![ (SCALEWAY_ACCESS_KEY, secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str()), (SCALEWAY_SECRET_KEY, secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str()), - (SCALEWAY_DEFAULT_PROJECT_ID, secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str()), + ( + SCALEWAY_DEFAULT_PROJECT_ID, + secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str(), + ), ], } } diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 37fb5292..5e7a8d51 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -330,7 +330,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); - assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + ret, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); test_name.to_string() }) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 40fe92ce..67189f37 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -117,7 +117,13 @@ fn deploy_a_working_environment_and_pause_it_eks() { let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Aws, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -125,7 +131,13 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods(context.clone(), Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Aws, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -268,7 +280,10 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); - assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + ret, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); test_name.to_string() }) @@ -626,8 +641,13 @@ fn redeploy_same_app_with_ebs() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = - is_pod_restarted_env(context.clone(), Kind::Aws, environment_check1, app_name.as_str(), secrets.clone()); + let (_, number) = is_pod_restarted_env( + context.clone(), + Kind::Aws, + environment_check1, + app_name.as_str(), + secrets.clone(), + ); let ret = environment_redeploy.deploy_environment(&ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); @@ -796,12 +816,18 @@ fn deploy_ok_fail_fail_ok_environment() { // FAIL and rollback let ret = not_working_env_1.deploy_environment(&ea_not_working_1, logger.clone(), &engine_config_for_not_working_1); - assert!(matches!(ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + ret, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // FAIL and Rollback again let ret = not_working_env_2.deploy_environment(&ea_not_working_2, logger.clone(), &engine_config_for_not_working_2); - assert!(matches!(ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + ret, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // Should be working let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index 4e0d67bc..53f790b8 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -25,7 +25,10 @@ fn create_and_destroy_eks_cluster( cluster_test( test_name, Kind::Aws, - context(generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str()), + context( + generate_id().as_str(), + generate_cluster_id(region.to_string().as_str()).as_str(), + ), logger(), region.to_aws_format().as_str(), Some(zones), diff --git a/tests/aws/aws_s3.rs b/tests/aws/aws_s3.rs index fb0b699f..33ac7aa7 100644 --- a/tests/aws/aws_s3.rs +++ b/tests/aws/aws_s3.rs @@ -172,7 +172,11 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = aws_os.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); + let result = aws_os.put( + bucket_name.as_str(), + object_key.as_str(), + temp_file.into_temp_path().to_str().unwrap(), + ); // validate: assert!(result.is_ok()); diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 0341671c..fcd475b7 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -131,7 +131,13 @@ fn deploy_an_environment_with_db_and_pause_it() { // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods(context.clone(), ProviderKind::Do, environment.clone(), app_name.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + ProviderKind::Do, + environment.clone(), + app_name.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -362,7 +368,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); - assert!(matches!(ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + ret, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); // delete images created during test from registries if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 28eee4e5..b3df0295 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -125,7 +125,10 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let result = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); - assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); @@ -176,7 +179,13 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Do, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -184,7 +193,13 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Do, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -194,7 +209,13 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Do, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Do, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -497,14 +518,24 @@ fn digitalocean_doks_redeploy_same_app() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = - is_pod_restarted_env(context.clone(), Kind::Do, environment_check1, app_name.as_str(), secrets.clone()); + let (_, number) = is_pod_restarted_env( + context.clone(), + Kind::Do, + environment_check1, + app_name.as_str(), + secrets.clone(), + ); let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); - let (_, number2) = - is_pod_restarted_env(context.clone(), Kind::Do, environment_check2, app_name.as_str(), secrets.clone()); + let (_, number2) = is_pod_restarted_env( + context.clone(), + Kind::Do, + environment_check2, + app_name.as_str(), + secrets.clone(), + ); // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); @@ -677,7 +708,10 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_1, ); - assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // FAIL and Rollback again let result = not_working_env_2.deploy_environment( @@ -685,7 +719,10 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_2, ); - assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // Should be working let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index 54f853b1..e214ede1 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -170,7 +170,11 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = spaces.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); + let result = spaces.put( + bucket_name.as_str(), + object_key.as_str(), + temp_file.into_temp_path().to_str().unwrap(), + ); // validate: assert!(result.is_ok()); diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 5bed5672..489d7e4b 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -137,7 +137,13 @@ fn deploy_an_environment_with_db_and_pause_it() { // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods(context.clone(), ProviderKind::Scw, environment.clone(), app_name.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + ProviderKind::Scw, + environment.clone(), + app_name.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -367,7 +373,10 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { } let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); - assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); // delete images created during test from registries if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 791bc5d7..68114006 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -129,7 +129,10 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let result = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); - assert!(matches!(result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); @@ -182,7 +185,13 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Scw, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -190,7 +199,13 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Scw, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -200,7 +215,13 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Scw, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -475,7 +496,13 @@ fn deploy_a_working_environment_and_pause_it() { let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Scw, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -483,7 +510,13 @@ fn deploy_a_working_environment_and_pause_it() { assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app - let ret = get_pods(context.clone(), Kind::Scw, environment.clone(), selector.as_str(), secrets.clone()); + let ret = get_pods( + context.clone(), + Kind::Scw, + environment.clone(), + selector.as_str(), + secrets.clone(), + ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); @@ -583,14 +616,24 @@ fn scaleway_kapsule_redeploy_same_app() { }; let app_name = format!("{}-0", &environment_check1.applications[0].name); - let (_, number) = - is_pod_restarted_env(context.clone(), Kind::Scw, environment_check1, app_name.as_str(), secrets.clone()); + let (_, number) = is_pod_restarted_env( + context.clone(), + Kind::Scw, + environment_check1, + app_name.as_str(), + secrets.clone(), + ); let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); - let (_, number2) = - is_pod_restarted_env(context.clone(), Kind::Scw, environment_check2, app_name.as_str(), secrets.clone()); + let (_, number2) = is_pod_restarted_env( + context.clone(), + Kind::Scw, + environment_check2, + app_name.as_str(), + secrets.clone(), + ); // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); @@ -770,7 +813,10 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_1, ); - assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // FAIL and Rollback again let result = not_working_env_2.deploy_environment( @@ -778,7 +824,10 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { logger.clone(), &engine_config_for_not_working_2, ); - assert!(matches!(result, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _))); + assert!(matches!( + result, + TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) + )); // Should be working let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); diff --git a/tests/scaleway/scw_object_storage.rs b/tests/scaleway/scw_object_storage.rs index 632e0aaa..35d232af 100644 --- a/tests/scaleway/scw_object_storage.rs +++ b/tests/scaleway/scw_object_storage.rs @@ -191,8 +191,11 @@ fn test_put_file() { let temp_file = NamedTempFile::new().expect("error while creating tempfile"); // compute: - let result = - scaleway_os.put(bucket_name.as_str(), object_key.as_str(), temp_file.into_temp_path().to_str().unwrap()); + let result = scaleway_os.put( + bucket_name.as_str(), + object_key.as_str(), + temp_file.into_temp_path().to_str().unwrap(), + ); // validate: assert!(result.is_ok()); From 08a6833b1c866dda90c7de75f177537fbdbb983c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 25 Mar 2022 16:28:36 +0100 Subject: [PATCH 71/85] Add test for build phase + fix for DO registry (#666) --- src/build_platform/mod.rs | 6 ++++ src/container_registry/docr.rs | 2 +- src/transaction.rs | 12 ++++++- test_utilities/src/common.rs | 37 +++++++++++++++++++ tests/aws/aws_environment.rs | 53 ++++++++++++++++++++++++++-- tests/digitalocean/do_environment.rs | 48 +++++++++++++++++++++++++ tests/lib.rs | 2 +- tests/scaleway/scw_environment.rs | 50 ++++++++++++++++++++++++++ tests/unit/mod.rs | 1 - 9 files changed, 204 insertions(+), 7 deletions(-) delete mode 100644 tests/unit/mod.rs diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index be1ce938..5804afa6 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -153,6 +153,12 @@ impl Image { pub fn name(&self) -> String { self.name.clone() } + + pub fn name_without_repository(&self) -> &str { + self.name + .strip_prefix(&format!("{}/", self.repository_name())) + .unwrap_or(&self.name) + } } impl Default for Image { diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index e369495a..922bda17 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -201,7 +201,7 @@ impl ContainerRegistry for DOCR { let url = format!( "https://api.digitalocean.com/v2/registry/{}/repositories/{}/tags", image.registry_name, - image.name() + image.name_without_repository() ); let res = reqwest::blocking::Client::new() diff --git a/src/transaction.rs b/src/transaction.rs index f29f9b82..46696e28 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -93,13 +93,23 @@ impl<'a> Transaction<'a> { ) } + pub fn build_environment( + &mut self, + environment: &Rc>, + option: DeploymentOption, + ) -> Result<(), EnvironmentError> { + self.steps.push(Step::BuildEnvironment(environment.clone(), option)); + + Ok(()) + } + pub fn deploy_environment_with_options( &mut self, environment: &Rc>, option: DeploymentOption, ) -> Result<(), EnvironmentError> { // add build step - self.steps.push(Step::BuildEnvironment(environment.clone(), option)); + self.build_environment(environment, option)?; // add deployment step self.steps.push(Step::DeployEnvironment(environment.clone())); diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 64042fb1..047f8e03 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -25,6 +25,7 @@ use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; +use qovery_engine::cloud_provider::environment::Environment; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::application::ScwZone; @@ -70,18 +71,27 @@ pub trait Cluster { } pub trait Infrastructure { + fn build_environment( + &self, + environment: &EnvironmentRequest, + logger: Box, + engine_config: &EngineConfig, + ) -> (Environment, TransactionResult); + fn deploy_environment( &self, environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; + fn pause_environment( &self, environment: &EnvironmentRequest, logger: Box, engine_config: &EngineConfig, ) -> TransactionResult; + fn delete_environment( &self, environment: &EnvironmentRequest, @@ -91,6 +101,33 @@ pub trait Infrastructure { } impl Infrastructure for EnvironmentRequest { + fn build_environment( + &self, + environment: &EnvironmentRequest, + logger: Box, + engine_config: &EngineConfig, + ) -> (Environment, TransactionResult) { + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment.to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ); + + let env = Rc::new(RefCell::new(env)); + let _ = tx.build_environment( + &env, + DeploymentOption { + force_build: true, + force_push: true, + }, + ); + + let ret = tx.commit(); + (Rc::try_unwrap(env).ok().unwrap().into_inner(), ret) + } + fn deploy_environment( &self, environment: &EnvironmentRequest, diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 67189f37..c14c00ef 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -16,9 +16,56 @@ use test_utilities::aws::aws_default_engine_config; use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; -// TODO: -// - Tests that applications are always restarted when receiving a CREATE action -// see: https://github.com/Qovery/engine/pull/269 +#[cfg(feature = "test-aws-self-hosted")] +#[named] +#[test] +fn aws_test_build_phase() { + // This test tries to run up to the build phase of the engine + // basically building and pushing each applications + let test_name = function_name!(); + engine_run_test(|| { + init(); + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + + let logger = logger(); + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .AWS_TEST_ORGANIZATION_ID + .as_ref() + .expect("AWS_TEST_ORGANIZATION_ID is not set") + .as_str(), + secrets + .AWS_TEST_CLUSTER_ID + .as_ref() + .expect("AWS_TEST_CLUSTER_ID is not set") + .as_str(), + ); + let engine_config = aws_default_engine_config(&context, logger.clone()); + let mut environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + environment.routers = vec![]; + let ea = environment.clone(); + + let (env, ret) = environment.build_environment(&ea, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} #[cfg(feature = "test-aws-self-hosted")] #[named] diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index b3df0295..0d036a95 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -20,6 +20,54 @@ use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on DigitalOcean infrastructure. // This cluster should be live in order to have those tests passing properly. +#[cfg(feature = "test-do-self-hosted")] +#[named] +#[test] +fn digitalocean_test_build_phase() { + let test_name = function_name!(); + engine_run_test(|| { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + + let secrets = FuncTestsSecrets::new(); + let logger = logger(); + let context = context( + secrets + .DIGITAL_OCEAN_TEST_ORGANIZATION_ID + .as_ref() + .expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"), + secrets + .DIGITAL_OCEAN_TEST_CLUSTER_ID + .as_ref() + .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), + ); + let engine_config = do_default_engine_config(&context, logger.clone()); + let environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .as_ref() + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + let env_action = environment.clone(); + + let (env, ret) = environment.build_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} + #[cfg(feature = "test-do-self-hosted")] #[named] #[test] diff --git a/tests/lib.rs b/tests/lib.rs index 1d73348f..bbc13eb3 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -1,6 +1,6 @@ #[macro_use] extern crate maplit; + mod aws; mod digitalocean; mod scaleway; -mod unit; diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 68114006..ae788b39 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -19,6 +19,56 @@ use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on Scaleway infrastructure. // This cluster should be live in order to have those tests passing properly. +#[cfg(feature = "test-scw-self-hosted")] +#[named] +#[test] +fn scaleway_test_build_phase() { + let test_name = function_name!(); + engine_run_test(|| { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + + let logger = logger(); + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .SCALEWAY_TEST_ORGANIZATION_ID + .as_ref() + .expect("SCALEWAY_TEST_ORGANIZATION_ID") + .as_str(), + secrets + .SCALEWAY_TEST_CLUSTER_ID + .as_ref() + .expect("SCALEWAY_TEST_CLUSTER_ID") + .as_str(), + ); + let engine_config = scw_default_engine_config(&context, logger.clone()); + let environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .as_ref() + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + let env_action = environment.clone(); + + let (env, ret) = environment.build_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} + #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] diff --git a/tests/unit/mod.rs b/tests/unit/mod.rs deleted file mode 100644 index 8b137891..00000000 --- a/tests/unit/mod.rs +++ /dev/null @@ -1 +0,0 @@ - From ef8636830a6b3e4d84633b88da23e27509992b8b Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 25 Mar 2022 16:29:31 +0100 Subject: [PATCH 72/85] Add minimal phase tests --- Cargo.toml | 13 ++++++++++--- tests/aws/aws_environment.rs | 4 ++-- tests/digitalocean/do_environment.rs | 4 ++-- tests/scaleway/scw_environment.rs | 4 ++-- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4c46c5cf..7f5b924b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,13 @@ tracing-test = "0.1.0" [features] default = [] -test-all = ["test-all-self-hosted", "test-all-infra", "test-all-managed-services", "test-all-whole-enchilada"] +test-all = ["test-all-minimal", "test-all-self-hosted", "test-all-infra", "test-all-managed-services", "test-all-whole-enchilada"] + +# Minimal depencies test (i.e: build, deploy nothing managed) +test-aws-minimal = [] +test-do-minimal = [] +test-scw-minimal = [] +test-all-minimal = ["test-aws-minimal", "test-do-minimal", "test-scw-minimal"] # functionnal tests by type test-aws-self-hosted = [] @@ -109,5 +115,6 @@ test-do-all = ["test-do-infra", "test-do-managed-services", "test-do-self-hosted test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-hosted", "test-scw-whole-enchilada"] # functionnal test with only a k8s cluster as a dependency -test-with-kube = [] -test-with-docker = [] +test-local-kube = [] +test-local-docker = [] +test-all-local = ["test-local-kube", "test-local-docker"] diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index c14c00ef..c90ae51b 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -16,7 +16,7 @@ use test_utilities::aws::aws_default_engine_config; use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; -#[cfg(feature = "test-aws-self-hosted")] +#[cfg(feature = "test-aws-minimal")] #[named] #[test] fn aws_test_build_phase() { @@ -67,7 +67,7 @@ fn aws_test_build_phase() { }) } -#[cfg(feature = "test-aws-self-hosted")] +#[cfg(feature = "test-aws-minimal")] #[named] #[test] fn deploy_a_working_environment_with_no_router_on_aws_eks() { diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 0d036a95..a15cf579 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -20,7 +20,7 @@ use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on DigitalOcean infrastructure. // This cluster should be live in order to have those tests passing properly. -#[cfg(feature = "test-do-self-hosted")] +#[cfg(feature = "test-do-minimal")] #[named] #[test] fn digitalocean_test_build_phase() { @@ -68,7 +68,7 @@ fn digitalocean_test_build_phase() { }) } -#[cfg(feature = "test-do-self-hosted")] +#[cfg(feature = "test-do-minimal")] #[named] #[test] fn digitalocean_doks_deploy_a_working_environment_with_no_router() { diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index ae788b39..ca0930a3 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -19,7 +19,7 @@ use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on Scaleway infrastructure. // This cluster should be live in order to have those tests passing properly. -#[cfg(feature = "test-scw-self-hosted")] +#[cfg(feature = "test-scw-minimal")] #[named] #[test] fn scaleway_test_build_phase() { @@ -69,7 +69,7 @@ fn scaleway_test_build_phase() { }) } -#[cfg(feature = "test-scw-self-hosted")] +#[cfg(feature = "test-scw-minimal")] #[named] #[test] fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { From 69b9d9d50c5f7e3bd1702bedafba26288896a60b Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 25 Mar 2022 16:34:38 +0100 Subject: [PATCH 73/85] feat: create DNS providers errors types (#655) --- src/dns_provider/cloudflare.rs | 15 +++------------ src/dns_provider/errors.rs | 7 +++++++ src/dns_provider/mod.rs | 22 +++++----------------- src/engine.rs | 3 ++- 4 files changed, 17 insertions(+), 30 deletions(-) create mode 100644 src/dns_provider/errors.rs diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 134c7b7a..48e8939c 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -1,8 +1,7 @@ use std::net::Ipv4Addr; +use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::{DnsProvider, Kind}; -use crate::errors::EngineError; -use crate::events::{ToTransmitter, Transmitter}; use crate::models::{Context, Domain}; pub struct Cloudflare { @@ -71,19 +70,11 @@ impl DnsProvider for Cloudflare { vec![Ipv4Addr::new(1, 1, 1, 1), Ipv4Addr::new(1, 0, 0, 1)] } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), DnsProviderError> { if self.cloudflare_api_token.is_empty() || self.cloudflare_email.is_empty() { - Err(EngineError::new_client_invalid_cloud_provider_credentials( - self.get_event_details(), - )) + Err(DnsProviderError::InvalidCredentials) } else { Ok(()) } } } - -impl ToTransmitter for Cloudflare { - fn to_transmitter(&self) -> Transmitter { - Transmitter::DnsProvider(self.id().to_string(), self.name().to_string()) - } -} diff --git a/src/dns_provider/errors.rs b/src/dns_provider/errors.rs new file mode 100644 index 00000000..8d97bedd --- /dev/null +++ b/src/dns_provider/errors.rs @@ -0,0 +1,7 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum DnsProviderError { + #[error("Invalid credentials error.")] + InvalidCredentials, +} diff --git a/src/dns_provider/mod.rs b/src/dns_provider/mod.rs index c8233c78..ce52fa81 100644 --- a/src/dns_provider/mod.rs +++ b/src/dns_provider/mod.rs @@ -1,14 +1,14 @@ use std::net::Ipv4Addr; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; +use crate::dns_provider::errors::DnsProviderError; use serde::{Deserialize, Serialize}; -use crate::models::{Context, Domain, QoveryIdentifier}; +use crate::models::{Context, Domain}; pub mod cloudflare; +pub mod errors; -pub trait DnsProvider: ToTransmitter { +pub trait DnsProvider { fn context(&self) -> &Context; fn provider_name(&self) -> &str; fn kind(&self) -> Kind; @@ -21,19 +21,7 @@ pub trait DnsProvider: ToTransmitter { fn token(&self) -> &str; fn domain(&self) -> &Domain; fn resolvers(&self) -> Vec; - fn is_valid(&self) -> Result<(), EngineError>; - fn get_event_details(&self) -> EventDetails { - let context = self.context(); - EventDetails::new( - None, - QoveryIdentifier::from(context.organization_id().to_string()), - QoveryIdentifier::from(context.cluster_id().to_string()), - QoveryIdentifier::from(context.execution_id().to_string()), - None, - Stage::Environment(EnvironmentStep::Deploy), - self.to_transmitter(), - ) - } + fn is_valid(&self) -> Result<(), DnsProviderError>; } #[derive(Serialize, Deserialize, Clone, Debug)] diff --git a/src/engine.rs b/src/engine.rs index 58a11192..a4c22bfe 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -7,6 +7,7 @@ use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::CloudProvider; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::ContainerRegistry; +use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::DnsProvider; use crate::errors::EngineError; use crate::models::Context; @@ -20,7 +21,7 @@ pub enum EngineConfigError { #[error("Cloud provider is not valid error: {0}")] CloudProviderNotValid(EngineError), #[error("DNS provider is not valid error: {0}")] - DnsProviderNotValid(EngineError), + DnsProviderNotValid(DnsProviderError), #[error("Kubernetes is not valid error: {0}")] KubernetesNotValid(EngineError), } From a46a7e42f6a636de8d9304799f3f287e7a711eee Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 25 Mar 2022 16:37:13 +0100 Subject: [PATCH 74/85] refactor: remove deprecated log param (#665) --- src/build_platform/local_docker.rs | 91 ++- src/cloud_provider/aws/kubernetes/mod.rs | 614 +++++++---------- src/cloud_provider/aws/router.rs | 45 +- .../digitalocean/kubernetes/mod.rs | 499 +++++--------- src/cloud_provider/digitalocean/router.rs | 45 +- src/cloud_provider/kubernetes.rs | 158 ++--- src/cloud_provider/scaleway/kubernetes/mod.rs | 639 +++++++----------- src/cloud_provider/scaleway/router.rs | 21 +- src/cloud_provider/service.rs | 173 ++--- src/cloud_provider/utilities.rs | 44 +- src/events/io.rs | 63 -- src/events/mod.rs | 37 - src/logger.rs | 45 +- src/transaction.rs | 24 +- 14 files changed, 910 insertions(+), 1588 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index e627f123..8f0bf51a 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -17,7 +17,7 @@ use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; @@ -62,15 +62,10 @@ impl LocalDocker { fn reclaim_space_if_needed(&self) { if env::var_os("CI").is_some() { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - self.get_event_details(), - EventMessage::new_from_safe( - "CI environment variable found, no docker prune will be made".to_string(), - ), - ), - ); + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("CI environment variable found, no docker prune will be made".to_string()), + )); return; } @@ -92,10 +87,10 @@ impl LocalDocker { event_details.clone(), &*self.logger(), ) { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new(e.to_string(), Some(e.to_string()))), - ); + self.logger.log(EngineEvent::Warning( + event_details, + EventMessage::new(e.to_string(), Some(e.to_string())), + )); } break; }; @@ -114,10 +109,10 @@ impl LocalDocker { let log_info = { let app_id = build.image.application_id.clone(); move |msg: String| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(msg.clone())), - ); + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(msg.clone()), + )); lh.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: app_id.clone() }, @@ -276,10 +271,10 @@ impl LocalDocker { let cmd_killer = CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled); exit_status = cmd.exec_with_abort( &mut |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(line.to_string()), + )); lh.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -291,10 +286,10 @@ impl LocalDocker { )); }, &mut |line| { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); + self.logger.log(EngineEvent::Warning( + self.get_event_details(), + EventMessage::new_from_safe(line.to_string()), + )); lh.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { @@ -376,10 +371,8 @@ impl BuildPlatform for LocalDocker { Some(msg.clone()), self.context.execution_id(), )); - self.logger.log( - LogLevel::Info, - EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), - ); + self.logger + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))); // LOGGING // Create callback that will be called by git to provide credentials per user @@ -524,32 +517,26 @@ fn check_docker_space_usage_and_clean( let docker_percentage_remaining = available_space * 100 / docker_path_size_info.get_total_space(); if docker_percentage_remaining < docker_max_disk_percentage_usage_before_purge || available_space == 0 { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details, - EventMessage::new_from_safe(format!( - "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", - docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge - )), - ), - ); + logger.log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe(format!( + "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", + docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge + )), + )); return docker.prune_images(); }; - logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details, - EventMessage::new_from_safe(format!( - "No need to purge old docker images, only {}% ({}/{}) disk used", - 100 - docker_percentage_remaining, - docker_path_size_info.get_available_space(), - docker_path_size_info.get_total_space(), - )), - ), - ); + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe(format!( + "No need to purge old docker images, only {}% ({}/{}) disk used", + 100 - docker_percentage_remaining, + docker_path_size_info.get_available_space(), + docker_path_size_info.get_total_space(), + )), + )); Ok(()) } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 12243714..c79927fb 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -38,7 +38,7 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString, @@ -184,7 +184,7 @@ impl EKS { let err = EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -273,13 +273,10 @@ impl EKS { event_details: EventDetails, replicas_count: u32, ) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), + )); let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let selector = "cluster-autoscaler-aws-cluster-autoscaler"; let namespace = "kube-system"; @@ -443,16 +440,13 @@ impl EKS { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details, - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { @@ -557,13 +551,10 @@ impl EKS { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); let listeners_helper = ListenersHelper::new(&self.listeners); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), + )); self.send_to_customer( format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -583,28 +574,18 @@ impl EKS { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -615,23 +596,17 @@ impl EKS { self.cloud_provider.access_key_id().as_str(), self.cloud_provider.secret_access_key().as_str(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Role {} is already present, no need to create", - role.role_name - )), - ), - ), - Err(e) => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), - None, - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Role {} is already present, no need to create", + role.role_name + )), + )), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), + None, + )), } } @@ -667,13 +642,10 @@ impl EKS { )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), + )); self.send_to_customer( format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -687,13 +659,10 @@ impl EKS { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( event_details, @@ -706,10 +675,10 @@ impl EKS { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; // terraform deployment dedicated to cloud resources @@ -756,13 +725,10 @@ impl EKS { disable_pleco: self.context.disable_pleco(), }; - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = aws_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, @@ -786,39 +752,29 @@ impl EKS { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("EKS.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("EKS.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -828,13 +784,10 @@ impl EKS { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -848,13 +801,10 @@ impl EKS { &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -906,8 +856,7 @@ impl EKS { } Err(e) => { let error = EngineError::new_terraform_state_does_not_exist(event_details, e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; @@ -960,7 +909,7 @@ impl EKS { match wait_engine_job_finish { Ok(_) => { - self.logger().log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { return Err(error) @@ -970,7 +919,7 @@ impl EKS { } } } - false => self.logger().log(LogLevel::Warning, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("The Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), } } @@ -984,22 +933,17 @@ impl EKS { format!("Pausing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - event_details.clone(), - EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), + )); match terraform_exec(temp_dir.as_str(), terraform_args) { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), @@ -1007,13 +951,10 @@ impl EKS { } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), + )); Ok(()) } @@ -1027,13 +968,10 @@ impl EKS { format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1071,13 +1009,10 @@ impl EKS { Ok(x) => x, Err(e) => { let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(safe_message.to_string(), Some(e.message())), + )); skip_kubernetes_step = true; "".to_string() @@ -1092,27 +1027,19 @@ impl EKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; if !skip_kubernetes_step { @@ -1122,10 +1049,10 @@ impl EKS { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( @@ -1138,13 +1065,10 @@ impl EKS { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( @@ -1152,28 +1076,22 @@ impl EKS { namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -1184,13 +1102,10 @@ impl EKS { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1200,10 +1115,8 @@ impl EKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new( @@ -1223,13 +1136,10 @@ impl EKS { self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { @@ -1240,34 +1150,25 @@ impl EKS { for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( @@ -1276,90 +1177,64 @@ impl EKS { self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1372,13 +1247,10 @@ impl EKS { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details, - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( @@ -1393,13 +1265,10 @@ impl EKS { } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), + )); Ok(()) } @@ -1507,13 +1376,10 @@ impl Kubernetes for EKS { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1529,13 +1395,10 @@ impl Kubernetes for EKS { format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), + )); // AWS requires the upgrade to be done in 2 steps (masters, then workers) // use the current kubernetes masters' version for workers, in order to avoid migration in one step @@ -1580,13 +1443,10 @@ impl Kubernetes for EKS { format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => { @@ -1594,15 +1454,12 @@ impl Kubernetes for EKS { format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes master nodes have been successfully upgraded.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes master nodes have been successfully upgraded.".to_string(), ), - ); + )); } Err(e) => { return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); @@ -1610,26 +1467,20 @@ impl Kubernetes for EKS { } } Some(KubernetesNodesType::Workers) => { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), ), - ); + )); } None => { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details, - EventMessage::new_from_safe( - "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), ), - ); + )); return Ok(()); } } @@ -1641,7 +1492,7 @@ impl Kubernetes for EKS { self.cloud_provider().credentials_environment_variables(), Stage::Infrastructure(InfrastructureStep::Upgrade), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1652,13 +1503,10 @@ impl Kubernetes for EKS { format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); // disable cluster autoscaler to avoid interfering with AWS upgrade procedure context.insert("enable_cluster_autoscaler", &false); @@ -1699,13 +1547,10 @@ impl Kubernetes for EKS { format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), + )); // Disable cluster autoscaler deployment let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; @@ -1716,15 +1561,12 @@ impl Kubernetes for EKS { format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes workers nodes have been successfully upgraded.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes workers nodes have been successfully upgraded.".to_string(), ), - ); + )); } Err(e) => { // enable cluster autoscaler deployment diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index 656b4d2e..940cee44 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -12,7 +12,7 @@ use crate::cmd::helm; use crate::cmd::helm::{to_engine_error, Timeout}; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; @@ -192,27 +192,21 @@ impl Service for RouterAws { Some(hostname) => context.insert("external_ingress_hostname_default", hostname.as_str()), None => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details, - EventMessage::new_from_safe( - "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), - ), + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe( + "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), - ); + )); } }, _ => { // FIXME really? // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details, - EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), + )); } } @@ -386,19 +380,16 @@ impl Create for RouterAws { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 85256d82..55fa89e2 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -40,7 +40,7 @@ use crate::events::Stage::Infrastructure; use crate::events::{ EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, }; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, ToHelmString, @@ -140,7 +140,7 @@ impl DOKS { e, ); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -356,16 +356,13 @@ impl DOKS { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details, - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { @@ -463,13 +460,10 @@ impl DOKS { )), self.context.execution_id(), )); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing DOKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing DOKS cluster deployment.".to_string()), + )); // upgrade cluster instead if required match self.get_kubeconfig_file() { @@ -485,28 +479,22 @@ impl DOKS { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), + self.logger().log(EngineEvent::Error(e, None)); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), ), - ); + )); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -542,13 +530,10 @@ impl DOKS { )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying DOKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying DOKS cluster.".to_string()), + )); self.send_to_customer( format!("Deploying DOKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -562,13 +547,10 @@ impl DOKS { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( event_details, @@ -581,18 +563,17 @@ impl DOKS { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { let error = EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -610,13 +591,10 @@ impl DOKS { format!("Kubernetes {} nodes have been successfully created", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), + )) } Err(e) => { return Err(EngineError::new_k8s_node_not_ready(event_details, e)); @@ -668,13 +646,10 @@ impl DOKS { let chart_prefix_path = &temp_dir; - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = do_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, @@ -761,39 +736,29 @@ impl DOKS { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("DOKS.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("DOKS.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new(ok_line, None)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Warning(event_details, EventMessage::new(ok_line, None))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("DOKS.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("DOKS.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -803,13 +768,10 @@ impl DOKS { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("DOKS.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("DOKS.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -819,13 +781,10 @@ impl DOKS { } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("DOKS.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("DOKS.pause_error() called.".to_string()), + )); Ok(()) } @@ -838,13 +797,10 @@ impl DOKS { format!("Preparing to delete DOKS cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete DOKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete DOKS cluster.".to_string()), + )); let temp_dir = match self.get_temp_dir(event_details.clone()) { Ok(dir) => dir, @@ -890,27 +846,19 @@ impl DOKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; let kubeconfig_path = &self.get_kubeconfig_file_path()?; @@ -923,10 +871,10 @@ impl DOKS { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( @@ -939,13 +887,10 @@ impl DOKS { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( @@ -953,28 +898,22 @@ impl DOKS { namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -985,13 +924,10 @@ impl DOKS { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1001,10 +937,8 @@ impl DOKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) @@ -1021,13 +955,10 @@ impl DOKS { self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { @@ -1038,34 +969,25 @@ impl DOKS { for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( @@ -1074,90 +996,64 @@ impl DOKS { self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Error deleting chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1170,13 +1066,10 @@ impl DOKS { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details, - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( @@ -1191,13 +1084,10 @@ impl DOKS { } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("DOKS.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("DOKS.delete_error() called.".to_string()), + )); Ok(()) } @@ -1279,16 +1169,13 @@ impl Kubernetes for DOKS { match File::open(&local_kubeconfig_generated) { Ok(_) => Some(local_kubeconfig_generated), Err(err) => { - self.logger().log( - LogLevel::Debug, - EngineEvent::Debug( - self.get_event_details(stage), - EventMessage::new( - err.to_string(), - Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), - ), + self.logger().log(EngineEvent::Debug( + self.get_event_details(stage), + EventMessage::new( + err.to_string(), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), ), - ); + )); None } } @@ -1427,13 +1314,10 @@ impl Kubernetes for DOKS { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing DOKS cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing DOKS cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1447,7 +1331,7 @@ impl Kubernetes for DOKS { self.cloud_provider().credentials_environment_variables(), event_details.stage().clone(), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1458,13 +1342,10 @@ impl Kubernetes for DOKS { format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); let upgrade_doks_version = match get_do_latest_doks_slug_from_api(self.cloud_provider.token(), self.version()) { Ok(version) => match version { @@ -1519,13 +1400,10 @@ impl Kubernetes for DOKS { format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => match self.check_workers_on_upgrade(kubernetes_upgrade_status.requested_version.to_string()) { @@ -1534,15 +1412,10 @@ impl Kubernetes for DOKS { format!("Kubernetes {} nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details, - EventMessage::new_from_safe( - "Kubernetes nodes have been successfully upgraded.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes nodes have been successfully upgraded.".to_string()), + )); } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index 83d769fc..cd9662cc 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -12,7 +12,7 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; @@ -202,27 +202,21 @@ impl Service for RouterDo { Some(hostname) => context.insert("external_ingress_hostname_default", hostname.as_str()), None => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details, - EventMessage::new_from_safe( - "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), - ), + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe( + "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), - ); + )); } }, _ => { // FIXME really? // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details, - EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), + )); } } @@ -396,19 +390,16 @@ impl Create for RouterDo { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 7dfc441d..86d2f166 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -31,7 +31,7 @@ use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter}; use crate::fs::workspace_directory; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::ProgressLevel::Info; use crate::models::{ Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, @@ -97,16 +97,13 @@ pub trait Kubernetes: Listen { match File::open(&local_kubeconfig_generated) { Ok(_) => Some(local_kubeconfig_generated), Err(err) => { - self.logger().log( - LogLevel::Debug, - EngineEvent::Debug( - self.get_event_details(stage.clone()), - EventMessage::new( - err.to_string(), - Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), - ), + self.logger().log(EngineEvent::Debug( + self.get_event_details(stage.clone()), + EventMessage::new( + err.to_string(), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), ), - ); + )); None } } @@ -136,8 +133,7 @@ pub trait Kubernetes: Listen { self.get_event_details(stage), err.into(), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } } @@ -151,8 +147,7 @@ pub trait Kubernetes: Listen { self.get_event_details(stage), CommandError::new_from_safe_message(format!("Error getting file metadata, error: {}", err,)), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; @@ -164,8 +159,7 @@ pub trait Kubernetes: Listen { self.get_event_details(stage), CommandError::new_from_safe_message(format!("Error setting file permissions, error: {}", err,)), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -195,8 +189,7 @@ pub trait Kubernetes: Listen { )), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -790,20 +783,17 @@ where for object in cert_manager_objects { // check resource exist first if let Err(e) = kubectl_exec_count_all_objects(&kubernetes_config, object, envs.clone()) { - logger.log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new( - format!( - "Encountering issues while trying to get objects kind {}: {:?}", - object, - e.message() - ), - None, + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Encountering issues while trying to get objects kind {}: {:?}", + object, + e.message() ), + None, ), - ); + )); continue; } @@ -813,13 +803,10 @@ where || match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { Ok(_) => OperationResult::Ok(()), Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(format!("Failed to delete all {} objects, retrying...", object,), None), - ), - ); + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Failed to delete all {} objects, retrying...", object,), None), + )); OperationResult::Retry(e) } }, @@ -1052,10 +1039,7 @@ fn check_kubernetes_upgrade_status( match compare_kubernetes_cluster_versions_for_upgrade(&deployed_masters_version, &wished_version) { Ok(x) => { if let Some(msg) = x.message { - logger.log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(msg)), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg))); }; if x.older_version_detected { older_masters_version_detected = x.older_version_detected; @@ -1078,15 +1062,12 @@ fn check_kubernetes_upgrade_status( // check workers versions if deployed_workers_version.is_empty() { - logger.log( - LogLevel::Warning, - EngineEvent::Deploying( - event_details, - EventMessage::new_from_safe( - "No worker nodes found, can't check if upgrade is required for workers".to_string(), - ), + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No worker nodes found, can't check if upgrade is required for workers".to_string(), ), - ); + )); return Ok(KubernetesUpgradeStatus { required_upgrade_on, @@ -1130,22 +1111,19 @@ fn check_kubernetes_upgrade_status( } } - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details, - EventMessage::new_from_safe(match &required_upgrade_on { - None => "All workers are up to date, no upgrade required".to_string(), - Some(node_type) => match node_type { - KubernetesNodesType::Masters => "Kubernetes master upgrade required".to_string(), - KubernetesNodesType::Workers => format!( - "Kubernetes workers upgrade required, need to update {}/{} nodes", - non_up_to_date_workers, total_workers - ), - }, - }), - ), - ); + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe(match &required_upgrade_on { + None => "All workers are up to date, no upgrade required".to_string(), + Some(node_type) => match node_type { + KubernetesNodesType::Masters => "Kubernetes master upgrade required".to_string(), + KubernetesNodesType::Workers => format!( + "Kubernetes workers upgrade required, need to update {}/{} nodes", + non_up_to_date_workers, total_workers + ), + }, + }), + )); Ok(KubernetesUpgradeStatus { required_upgrade_on, @@ -1330,42 +1308,33 @@ where match action { Action::Create => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Create), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Create), ), - ); + event_message, + )); } Action::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Pause), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Pause), ), - ); + event_message, + )); } Action::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Delete), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Delete), ), - ); + event_message, + )); } Action::Nothing => {} // should not happens }; @@ -1414,10 +1383,7 @@ pub fn validate_k8s_required_cpu_and_burstable( context_id, )); - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(message)), - ); + logger.log(EngineEvent::Warning(event_details, EventMessage::new_from_safe(message))); set_cpu_burst = total_cpu.clone(); } diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 9de13154..960c0bce 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -23,7 +23,7 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, }; @@ -173,7 +173,7 @@ impl Kapsule { e, ); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -388,20 +388,17 @@ impl Kapsule { fn check_missing_nodegroup_info(&self, item: &Option, name: &str) -> Result<(), ScwNodeGroupErrors> { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_workers_group_info_error( - event_details, - CommandError::new_from_safe_message(format!( - "Missing node pool info {} for cluster {}", - name, - self.context.cluster_id() - )), - ), - None, + self.logger.log(EngineEvent::Error( + EngineError::new_missing_workers_group_info_error( + event_details, + CommandError::new_from_safe_message(format!( + "Missing node pool info {} for cluster {}", + name, + self.context.cluster_id() + )), ), - ); + None, + )); if item.is_none() { return Err(ScwNodeGroupErrors::MissingNodePoolInfo); @@ -528,16 +525,13 @@ impl Kapsule { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details, - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { @@ -587,13 +581,10 @@ impl Kapsule { format!("Preparing SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing SCW cluster deployment.".to_string()), - ), - ); + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing SCW cluster deployment.".to_string()), + )); // upgrade cluster instead if required match self.get_kubeconfig_file() { @@ -609,28 +600,18 @@ impl Kapsule { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -666,13 +647,10 @@ impl Kapsule { )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying SCW cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying SCW cluster.".to_string()), + )); self.send_to_customer( format!("Deploying SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), @@ -687,13 +665,10 @@ impl Kapsule { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( event_details, @@ -706,21 +681,18 @@ impl Kapsule { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error(EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), None), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; // TODO(benjaminch): move this elsewhere // Create object-storage buckets - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), + )); if let Err(e) = self .object_storage .create_bucket(self.kubeconfig_bucket_name().as_str()) @@ -730,8 +702,7 @@ impl Kapsule { self.kubeconfig_bucket_name(), e, ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -739,8 +710,7 @@ impl Kapsule { if let Err(e) = self.object_storage.create_bucket(self.logs_bucket_name().as_str()) { let error = EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -764,8 +734,7 @@ impl Kapsule { kubeconfig_name.to_string(), e, ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -790,15 +759,12 @@ impl Kapsule { Some(c), )) } - ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "cluster do not exists, no node groups can be retrieved for upgrade check".to_string(), - ), + ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe( + "cluster do not exists, no node groups can be retrieved for upgrade check".to_string(), ), - ), + )), ScwNodeGroupErrors::MultipleClusterFound => { let msg = "multiple clusters found, can't match the correct node groups".to_string(); return Err(EngineError::new_multiple_cluster_found_expected_one_error( @@ -806,15 +772,12 @@ impl Kapsule { CommandError::new(msg.clone(), Some(msg)), )); } - ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "cluster exists, but no node groups found for upgrade check".to_string(), - ), + ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe( + "cluster exists, but no node groups found for upgrade check".to_string(), ), - ), + )), ScwNodeGroupErrors::MissingNodePoolInfo => { let msg = "Error with Scaleway API while trying to retrieve node pool info".to_string(); return Err(EngineError::new_missing_api_info_from_cloud_provider_error( @@ -834,33 +797,27 @@ impl Kapsule { }; // ensure all node groups are in ready state Scaleway side - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "ensuring all groups nodes are in ready state from the Scaleway API".to_string(), - ), + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "ensuring all groups nodes are in ready state from the Scaleway API".to_string(), ), - ); + )); for ng in current_nodegroups { let res = retry::retry( // retry 10 min max per nodegroup until they are ready Fixed::from_millis(15000).take(40), || { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "checking node group {}/{:?}, current status: {:?}", - &ng.name, - &ng.id.as_ref().unwrap_or(&"unknown".to_string()), - &ng.status - )), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "checking node group {}/{:?}, current status: {:?}", + &ng.name, + &ng.id.as_ref().unwrap_or(&"unknown".to_string()), + &ng.status + )), + )); let pool_id = match &ng.id { None => { let msg = @@ -883,15 +840,13 @@ impl Kapsule { event_details.clone(), Some(c), ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } ScwNodeGroupErrors::ClusterDoesNotExists(c) => { let current_error = EngineError::new_no_cluster_found_error(event_details.clone(), c); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } ScwNodeGroupErrors::MultipleClusterFound => { @@ -914,8 +869,7 @@ impl Kapsule { event_details.clone(), Some(c), ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } } @@ -944,15 +898,12 @@ impl Kapsule { } } } - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "all node groups for this cluster are ready from cloud provider API".to_string(), - ), + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "all node groups for this cluster are ready from cloud provider API".to_string(), ), - ); + )); // ensure all nodes are ready on Kubernetes match self.check_workers_on_create() { @@ -961,13 +912,10 @@ impl Kapsule { format!("Kubernetes {} nodes have been successfully created", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), + )) } Err(e) => { return Err(EngineError::new_k8s_node_not_ready(event_details, e)); @@ -1009,13 +957,10 @@ impl Kapsule { self.options.clone(), ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = scw_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, @@ -1039,39 +984,29 @@ impl Kapsule { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("SCW.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("SCW.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details, EventMessage::new_from_safe(ok_line)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(ok_line))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("SCW.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("SCW.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -1081,13 +1016,10 @@ impl Kapsule { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("SCW.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("SCW.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -1101,13 +1033,10 @@ impl Kapsule { &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing SCW cluster pause.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("Preparing SCW cluster pause.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1159,8 +1088,7 @@ impl Kapsule { } Err(e) => { let error = EngineError::new_terraform_state_does_not_exist(event_details, e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; @@ -1214,7 +1142,7 @@ impl Kapsule { match wait_engine_job_finish { Ok(_) => { - self.logger().log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { return Err(error) @@ -1224,7 +1152,7 @@ impl Kapsule { } } } - false => self.logger().log(LogLevel::Warning, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("The Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), } } @@ -1238,22 +1166,17 @@ impl Kapsule { format!("Pausing SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - event_details.clone(), - EventMessage::new_from_safe("Pausing SCW cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing SCW cluster deployment.".to_string()), + )); match terraform_exec(temp_dir.as_str(), terraform_args) { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details, EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), @@ -1261,13 +1184,10 @@ impl Kapsule { } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("SCW.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("SCW.pause_error() called.".to_string()), + )); Ok(()) } @@ -1281,13 +1201,10 @@ impl Kapsule { format!("Preparing to delete SCW cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete SCW cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete SCW cluster.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1329,27 +1246,19 @@ impl Kapsule { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; let kubeconfig_path = &self.get_kubeconfig_file_path()?; @@ -1362,10 +1271,10 @@ impl Kapsule { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( @@ -1378,13 +1287,10 @@ impl Kapsule { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( @@ -1392,28 +1298,22 @@ impl Kapsule { namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -1424,13 +1324,10 @@ impl Kapsule { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1440,10 +1337,8 @@ impl Kapsule { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) @@ -1460,13 +1355,10 @@ impl Kapsule { self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { @@ -1477,34 +1369,25 @@ impl Kapsule { for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( @@ -1513,90 +1396,64 @@ impl Kapsule { self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Error deleting chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1609,13 +1466,10 @@ impl Kapsule { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details, - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( @@ -1630,13 +1484,10 @@ impl Kapsule { } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("SCW.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("SCW.delete_error() called.".to_string()), + )); Ok(()) } @@ -1743,13 +1594,10 @@ impl Kubernetes for Kapsule { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing SCW cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing SCW cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1763,7 +1611,7 @@ impl Kubernetes for Kapsule { self.cloud_provider().credentials_environment_variables(), Stage::Infrastructure(InfrastructureStep::Upgrade), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1774,13 +1622,10 @@ impl Kubernetes for Kapsule { format!("Preparing nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing nodes for upgrade for Kubernetes cluster.".to_string()), + )); context.insert( "kubernetes_cluster_version", @@ -1817,13 +1662,10 @@ impl Kubernetes for Kapsule { format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => match self.check_workers_on_upgrade(kubernetes_upgrade_status.requested_version.to_string()) { @@ -1832,15 +1674,10 @@ impl Kubernetes for Kapsule { format!("Kubernetes {} nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details, - EventMessage::new_from_safe( - "Kubernetes nodes have been successfully upgraded.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes nodes have been successfully upgraded.".to_string()), + )); } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 3c62e19c..ee6a6e40 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -12,7 +12,7 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; @@ -340,19 +340,16 @@ impl Create for RouterScw { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 3332a852..216e69e1 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -21,7 +21,7 @@ use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replica use crate::cmd::structs::LabelsContent; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::ProgressLevel::Info; use crate::models::{ Context, DatabaseMode, Listen, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, @@ -310,17 +310,14 @@ where match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details) { Ok(lines) => lines, Err(err) => { - logger.log( - LogLevel::Error, - EngineEvent::Error( - err, - Some(EventMessage::new_from_safe(format!( - "error while retrieving debug logs from {} {}", - service.service_type().name(), - service.name_with_id(), - ))), - ), - ); + logger.log(EngineEvent::Error( + err, + Some(EventMessage::new_from_safe(format!( + "error while retrieving debug logs from {} {}", + service.service_type().name(), + service.name_with_id(), + ))), + )); Vec::new() } @@ -573,17 +570,14 @@ where let environment = target.environment; if service.is_managed_service() { - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Deploying managed {} `{}`", - service.service_type().name(), - service.name_with_id() - )), - ), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Deploying managed {} `{}`", + service.service_type().name(), + service.name_with_id() + )), + )); let context = service.tera_context(target)?; @@ -634,17 +628,14 @@ where .map_err(|e| EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e))?; } else { // use helm - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Deploying containerized {} `{}` on Kubernetes cluster", - service.service_type().name(), - service.name_with_id() - )), - ), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Deploying containerized {} `{}` on Kubernetes cluster", + service.service_type().name(), + service.name_with_id() + )), + )); let context = service.tera_context(target)?; let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; @@ -815,20 +806,17 @@ where match crate::cmd::terraform::terraform_init_validate_destroy(workspace_dir.as_str(), true) { Ok(_) => { - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - event_details, - EventMessage::new_from_safe("Deleting secret containing tfstates".to_string()), - ), - ); + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Deleting secret containing tfstates".to_string()), + )); let _ = delete_terraform_tfstate_secret(kubernetes, environment.namespace(), &get_tfstate_name(service)); } Err(e) => { let engine_err = EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, e); - logger.log(LogLevel::Error, EngineEvent::Error(engine_err.clone(), None)); + logger.log(EngineEvent::Error(engine_err.clone(), None)); return Err(engine_err); } @@ -892,10 +880,10 @@ where version.as_str() ); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -949,7 +937,7 @@ where service.version(), ); - logger.log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + logger.log(EngineEvent::Error(error.clone(), None)); Err(error) } @@ -1011,24 +999,15 @@ where match action { CheckAction::Deploy => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(message)), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } CheckAction::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } CheckAction::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } } @@ -1047,19 +1026,16 @@ where kubernetes.context().execution_id(), ); - logger.log( - LogLevel::Error, - EngineEvent::Error( - err.clone(), - Some(EventMessage::new_from_safe(format!( - "{} error with {} {} , id: {}", - action_verb, - service.service_type().name(), - service.name(), - service.id(), - ))), - ), - ); + logger.log(EngineEvent::Error( + err.clone(), + Some(EventMessage::new_from_safe(format!( + "{} error with {} {} , id: {}", + action_verb, + service.service_type().name(), + service.name(), + service.id(), + ))), + )); match action { CheckAction::Deploy => listeners_helper.deployment_error(progress_info), @@ -1081,10 +1057,10 @@ where kubernetes.context().execution_id(), ); - logger.log( - LogLevel::Debug, - EngineEvent::Debug(event_details.clone(), EventMessage::new_from_safe(debug_logs_string)), - ); + logger.log(EngineEvent::Debug( + event_details.clone(), + EventMessage::new_from_safe(debug_logs_string), + )); match action { CheckAction::Deploy => listeners_helper.deployment_error(progress_info), @@ -1315,42 +1291,33 @@ where match action { Action::Create => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Deploy), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Deploy), ), - ); + event_message, + )); } Action::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Pause), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Pause), ), - ); + event_message, + )); } Action::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Delete), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Delete), ), - ); + event_message, + )); } Action::Nothing => {} // should not happens }; diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 87e0cb2b..28a20894 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventDetails, EventMessage}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use chrono::Duration; use core::option::Option::{None, Some}; @@ -455,10 +455,10 @@ pub fn check_domain_for( resolver }; - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); let fixed_iterable = Fixed::from_millis(3000).take(100); let check_result = retry::retry(fixed_iterable, || match next_resolver().lookup_ip(domain) { @@ -466,10 +466,10 @@ pub fn check_domain_for( Err(err) => { let x = format!("Domain resolution check for '{}' is still in progress...", domain); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(x.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(x.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -488,10 +488,10 @@ pub fn check_domain_for( Ok(_) => { let x = format!("Domain {} is ready! ⚡️", domain); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -509,10 +509,10 @@ pub fn check_domain_for( domain ); - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -552,14 +552,8 @@ pub fn print_action( ) { let msg = format!("{}.{}.{} called for {}", cloud_provider_name, struct_name, fn_name, item_name); match fn_name.contains("error") { - true => logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg)), - ), - false => logger.log( - LogLevel::Info, - EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), - ), + true => logger.log(EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg))), + false => logger.log(EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))), } } diff --git a/src/events/io.rs b/src/events/io.rs index 503dc21d..9bf9693c 100644 --- a/src/events/io.rs +++ b/src/events/io.rs @@ -25,41 +25,6 @@ pub enum EngineEvent { error: EngineError, message: Option, }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Waiting { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deploying { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Pausing { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleting { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deployed { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Paused { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleted { - details: EventDetails, - message: EventMessage, - }, } impl From for EngineEvent { @@ -81,34 +46,6 @@ impl From for EngineEvent { error: EngineError::from(e), message: m.map(EventMessage::from), }, - events::EngineEvent::Waiting(d, m) => EngineEvent::Waiting { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deploying(d, m) => EngineEvent::Deploying { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Pausing(d, m) => EngineEvent::Pausing { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deleting(d, m) => EngineEvent::Deleting { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deployed(d, m) => EngineEvent::Deployed { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Paused(d, m) => EngineEvent::Paused { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deleted(d, m) => EngineEvent::Deleted { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, } } } diff --git a/src/events/mod.rs b/src/events/mod.rs index b043939e..eb5565f8 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -22,29 +22,6 @@ pub enum EngineEvent { Warning(EventDetails, EventMessage), /// Error: represents an error event. Error(EngineError, Option), - /// Waiting: represents an engine waiting event. - /// - /// Engine is waiting for a task to be done. - #[deprecated(note = "event status is carried by EventDetails directly")] - Waiting(EventDetails, EventMessage), - /// Deploying: represents an engine deploying event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deploying(EventDetails, EventMessage), - /// Pausing: represents an engine pausing event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Pausing(EventDetails, EventMessage), - /// Deleting: represents an engine deleting event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleting(EventDetails, EventMessage), - /// Deployed: represents an engine deployed event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deployed(EventDetails, EventMessage), - /// Paused: represents an engine paused event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Paused(EventDetails, EventMessage), - /// Deleted: represents an engine deleted event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleted(EventDetails, EventMessage), } impl EngineEvent { @@ -55,13 +32,6 @@ impl EngineEvent { EngineEvent::Info(details, _message) => details, EngineEvent::Warning(details, _message) => details, EngineEvent::Error(engine_error, _message) => engine_error.event_details(), - EngineEvent::Waiting(details, _message) => details, - EngineEvent::Deploying(details, _message) => details, - EngineEvent::Pausing(details, _message) => details, - EngineEvent::Deleting(details, _message) => details, - EngineEvent::Deployed(details, _message) => details, - EngineEvent::Paused(details, _message) => details, - EngineEvent::Deleted(details, _message) => details, } } @@ -72,13 +42,6 @@ impl EngineEvent { EngineEvent::Info(_details, message) => message.message(message_verbosity), EngineEvent::Warning(_details, message) => message.message(message_verbosity), EngineEvent::Error(engine_error, _message) => engine_error.message(), - EngineEvent::Waiting(_details, message) => message.message(message_verbosity), - EngineEvent::Deploying(_details, message) => message.message(message_verbosity), - EngineEvent::Pausing(_details, message) => message.message(message_verbosity), - EngineEvent::Deleting(_details, message) => message.message(message_verbosity), - EngineEvent::Deployed(_details, message) => message.message(message_verbosity), - EngineEvent::Paused(_details, message) => message.message(message_verbosity), - EngineEvent::Deleted(_details, message) => message.message(message_verbosity), } } } diff --git a/src/logger.rs b/src/logger.rs index 2d96b8d3..02f62444 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,16 +1,8 @@ use crate::events::{EngineEvent, EventMessageVerbosity}; use tracing; -#[derive(Debug, Clone)] -pub enum LogLevel { - Debug, - Info, - Warning, - Error, -} - pub trait Logger: Send + Sync { - fn log(&self, log_level: LogLevel, event: EngineEvent); + fn log(&self, event: EngineEvent); fn clone_dyn(&self) -> Box; } @@ -37,7 +29,7 @@ impl Default for StdIoLogger { } impl Logger for StdIoLogger { - fn log(&self, log_level: LogLevel, event: EngineEvent) { + fn log(&self, event: EngineEvent) { let event_details = event.get_details(); let stage = event_details.stage(); let execution_id = event_details.execution_id().to_string(); @@ -63,11 +55,11 @@ impl Logger for StdIoLogger { transmitter = event_details.transmitter().to_string().as_str(), ) .in_scope(|| { - match log_level { - LogLevel::Debug => debug!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Info => info!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Warning => warn!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Error => error!("{}", event.message(EventMessageVerbosity::FullDetails)), + match event { + EngineEvent::Debug(_, _) => debug!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Info(_, _) => info!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Warning(_, _) => warn!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Error(_, _) => error!("{}", event.message(EventMessageVerbosity::FullDetails)), }; }); } @@ -91,7 +83,6 @@ mod tests { use uuid::Uuid; struct TestCase<'a> { - log_level: LogLevel, event: EngineEvent, description: &'a str, } @@ -115,7 +106,6 @@ mod tests { let test_cases = vec![ TestCase { - log_level: LogLevel::Error, event: EngineEvent::Error( EngineError::new_unknown( EventDetails::new( @@ -141,8 +131,7 @@ mod tests { description: "Error event", }, TestCase { - log_level: LogLevel::Info, - event: EngineEvent::Deploying( + event: EngineEvent::Info( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -157,8 +146,7 @@ mod tests { description: "Deploying info event", }, TestCase { - log_level: LogLevel::Debug, - event: EngineEvent::Pausing( + event: EngineEvent::Debug( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -173,8 +161,7 @@ mod tests { description: "Pausing application debug event", }, TestCase { - log_level: LogLevel::Warning, - event: EngineEvent::Pausing( + event: EngineEvent::Warning( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -194,15 +181,15 @@ mod tests { for tc in test_cases { // execute: - logger.log(tc.log_level.clone(), tc.event.clone()); + logger.log(tc.event.clone()); // validate: assert!( - logs_contain(match tc.log_level { - LogLevel::Debug => "DEBUG", - LogLevel::Info => "INFO", - LogLevel::Warning => "WARN", - LogLevel::Error => "ERROR", + logs_contain(match tc.event { + EngineEvent::Debug(_, _) => "DEBUG", + EngineEvent::Info(_, _) => "INFO", + EngineEvent::Warning(_, _) => "WARN", + EngineEvent::Error(_, _) => "ERROR", }), "{}", tc.description diff --git a/src/transaction.rs b/src/transaction.rs index 46696e28..e74d5470 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -11,7 +11,7 @@ use crate::container_registry::to_engine_error; use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; +use crate::logger::Logger; use crate::models::{EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier}; pub struct Transaction<'a> { @@ -211,13 +211,8 @@ impl<'a> Transaction<'a> { ListenersHelper::new(self.engine.build_platform().listeners()).deployment_in_progress(progress_info); let event_details = build_event_details(); - self.logger.log( - match build_result.is_ok() { - true => LogLevel::Info, - false => LogLevel::Error, - }, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg)), - ); + self.logger + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg))); // Abort if it was an error let _ = build_result.map_err(|err| crate::build_platform::to_engine_error(event_details, err))?; @@ -331,15 +326,10 @@ impl<'a> Transaction<'a> { match self.build_and_push_applications(applications, &option) { Ok(apps) => apps, Err(engine_err) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - engine_err.clone(), - Some(EventMessage::new_from_safe( - "ROLLBACK STARTED! an error occurred".to_string(), - )), - ), - ); + self.logger.log(EngineEvent::Error( + engine_err.clone(), + Some(EventMessage::new_from_safe("ROLLBACK STARTED! an error occurred".to_string())), + )); return if engine_err.tag() == &Tag::TaskCancellationRequested { TransactionResult::Canceled From fa6e83424ddb619637ebb077d688a4577c70d742 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Sat, 26 Mar 2022 20:12:45 +0100 Subject: [PATCH 75/85] Fix docker init for tests (#667) * Fix docker init for tests --- src/cmd/docker.rs | 11 +++++++++++ test_utilities/src/utilities.rs | 6 ++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index fa435cd6..1d3d37d8 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -1,6 +1,8 @@ use crate::cmd::command::{CommandError, CommandKiller, QoveryCommand}; +use lazy_static::lazy_static; use std::path::Path; use std::process::ExitStatus; +use std::sync::Mutex; use url::Url; #[derive(thiserror::Error, Debug)] @@ -21,6 +23,13 @@ pub enum DockerError { Timeout(String), } +lazy_static! { + // Docker login when launched in parallel can mess up ~/.docker/config.json + // We use a mutex that will force serialization of logins in order to avoid that + // Mostly use for CI/Test when all test start in parallel and it the login phase at the same time + static ref LOGIN_LOCK: Mutex<()> = Mutex::new(()); +} + #[derive(Debug)] pub struct ContainerImage { pub registry: Url, @@ -130,6 +139,8 @@ impl Docker { pub fn login(&self, registry: &Url) -> Result<(), DockerError> { info!("Docker login {} as user {}", registry, registry.username()); + + let _lock = LOGIN_LOCK.lock().unwrap(); let password = urlencoding::decode(registry.password().unwrap_or_default()) .unwrap_or_default() .to_string(); diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 58bf34f4..eb3798b5 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -54,6 +54,7 @@ use qovery_engine::logger::{Logger, StdIoLogger}; use qovery_engine::models::DatabaseMode::MANAGED; use qovery_engine::runtime::block_on; use time::Instant; +use url::Url; pub fn context(organization_id: &str, cluster_id: &str) -> Context { let organization_id = organization_id.to_string(); @@ -61,7 +62,8 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { let execution_id = execution_id(); let home_dir = std::env::var("WORKSPACE_ROOT_DIR").unwrap_or(home_dir().unwrap().to_str().unwrap().to_string()); let lib_root_dir = std::env::var("LIB_ROOT_DIR").expect("LIB_ROOT_DIR is mandatory"); - let docker = Docker::new(None).expect("Can't init docker"); + let docker_host = std::env::var("DOCKER_HOST").map(|x| Url::parse(&x).unwrap()).ok(); + let docker = Docker::new(docker_host.clone()).expect("Can't init docker"); let metadata = Metadata { dry_run_deploy: Option::from({ @@ -98,7 +100,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { home_dir, lib_root_dir, true, - None, + docker_host, enabled_features, Option::from(metadata), docker, From 3fd7bd8a4d0061cbb8baab0efffd699984108c8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Sat, 26 Mar 2022 22:21:04 +0100 Subject: [PATCH 76/85] Add more logs --- src/container_registry/ecr.rs | 29 ++++++++++++++++++++++++++++- src/container_registry/mod.rs | 19 +++++++++++++++++-- test_utilities/src/aws.rs | 5 +++-- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 0db19cea..41f54084 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -13,7 +13,11 @@ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient}; use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::events::{EngineEvent, EventMessage, GeneralStep, Stage}; +use crate::logger::Logger; +use crate::models::{ + Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, +}; use crate::runtime::block_on; use retry::delay::Fixed; use retry::Error::Operation; @@ -30,6 +34,7 @@ pub struct ECR { region: Region, registry_info: Option, listeners: Listeners, + logger: Box, } impl ECR { @@ -40,6 +45,7 @@ impl ECR { access_key_id: &str, secret_access_key: &str, region: &str, + logger: Box, ) -> Result { let mut cr = ECR { context, @@ -50,6 +56,7 @@ impl ECR { region: Region::from_str(region).unwrap(), registry_info: None, listeners: vec![], + logger, }; let credentials = cr.get_credentials()?; @@ -57,6 +64,7 @@ impl ECR { let _ = registry_url.set_username(&credentials.access_token); let _ = registry_url.set_password(Some(&credentials.password)); + cr.log_info(format!("🔓 Login to ECR registry {}", credentials.endpoint_url)); let _ = cr .context .docker @@ -76,6 +84,23 @@ impl ECR { Ok(cr) } + pub fn log_info(&self, msg: String) { + self.logger.log(EngineEvent::Info( + self.get_event_details(Stage::General(GeneralStep::ValidateSystemRequirements)), + EventMessage::new_from_safe(msg.clone()), + )); + + let lh = ListenersHelper::new(&self.listeners); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Environment { + id: self.context.execution_id().to_string(), + }, + ProgressLevel::Info, + Some(msg), + self.context.execution_id(), + )); + } + pub fn credentials(&self) -> StaticProvider { StaticProvider::new(self.access_key_id.to_string(), self.secret_access_key.to_string(), None, None) } @@ -225,6 +250,8 @@ impl ECR { } fn get_or_create_repository(&self, repository_name: &str) -> Result { + self.log_info(format!("🗂️ Provisioning container repository {}", repository_name)); + // check if the repository already exists let repository = self.get_repository(repository_name); if let Some(repo) = repository { diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 762b38c8..7f1bd271 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -4,8 +4,8 @@ use url::Url; use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::errors::EngineError; -use crate::events::EventDetails; -use crate::models::{Context, Listen}; +use crate::events::{EventDetails, Stage, Transmitter}; +use crate::models::{Context, Listen, QoveryIdentifier}; pub mod docr; pub mod ecr; @@ -36,6 +36,21 @@ pub trait ContainerRegistry: Listen { // Check on the registry if a specific image already exist fn does_image_exists(&self, image: &Image) -> bool; + + fn get_event_details(&self, stage: Stage) -> EventDetails { + let context = self.context(); + let ev = EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()), + ); + + ev + } } pub fn to_engine_error(event_details: EventDetails, err: ContainerRegistryError) -> EngineError { diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 41f59b17..b2d72010 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -32,7 +32,7 @@ pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; -pub fn container_registry_ecr(context: &Context) -> ECR { +pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR { let secrets = FuncTestsSecrets::new(); if secrets.AWS_ACCESS_KEY_ID.is_none() || secrets.AWS_SECRET_ACCESS_KEY.is_none() @@ -49,6 +49,7 @@ pub fn container_registry_ecr(context: &Context) -> ECR { secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(), secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(), secrets.AWS_DEFAULT_REGION.unwrap().as_str(), + logger, ) .unwrap() } @@ -73,7 +74,7 @@ impl Cluster for AWS { vpc_network_mode: Option, ) -> EngineConfig { // use ECR - let container_registry = Box::new(container_registry_ecr(context)); + let container_registry = Box::new(container_registry_ecr(context, logger.clone())); // use LocalDocker let build_platform = Box::new(build_platform_local_docker(context, logger.clone())); From 24054a273cf166ae6f20b3174c6a43e9051a2eda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Sat, 26 Mar 2022 23:07:36 +0100 Subject: [PATCH 77/85] Add logs --- src/container_registry/docr.rs | 10 ++++++++-- src/container_registry/ecr.rs | 3 ++- src/container_registry/scaleway_container_registry.rs | 3 ++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index 922bda17..f0134373 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -28,7 +28,13 @@ pub struct DOCR { } impl DOCR { - pub fn new(context: Context, id: &str, name: &str, api_key: &str) -> Result { + pub fn new( + context: Context, + id: &str, + name: &str, + api_key: &str, + listener: Listener, + ) -> Result { let registry_name = name.to_string(); let registry_name2 = name.to_string(); let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); @@ -48,7 +54,7 @@ impl DOCR { name: name.to_string(), api_key: api_key.into(), id: id.into(), - listeners: vec![], + listeners: vec![listener], registry_info, }; diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 41f54084..ec9ef8ae 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -45,6 +45,7 @@ impl ECR { access_key_id: &str, secret_access_key: &str, region: &str, + listener: Listener, logger: Box, ) -> Result { let mut cr = ECR { @@ -55,7 +56,7 @@ impl ECR { secret_access_key: secret_access_key.to_string(), region: Region::from_str(region).unwrap(), registry_info: None, - listeners: vec![], + listeners: vec![listener], logger, }; diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index fd3849ca..7fab0e68 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -30,6 +30,7 @@ impl ScalewayCR { secret_token: &str, default_project_id: &str, zone: ScwZone, + listener: Listener, ) -> Result { // Be sure we are logged on the registry let login = "nologin".to_string(); @@ -63,7 +64,7 @@ impl ScalewayCR { secret_token, zone, registry_info, - listeners: Vec::new(), + listeners: vec![listener], }; Ok(cr) From 4bba90885359df245582fa814100c2e9a0251c7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Sat, 26 Mar 2022 23:21:17 +0100 Subject: [PATCH 78/85] Add logs --- src/models.rs | 15 +++++++++++++++ test_utilities/src/aws.rs | 3 ++- test_utilities/src/digitalocean.rs | 12 ++++++++++-- test_utilities/src/scaleway.rs | 4 +++- 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/models.rs b/src/models.rs index c2126e5d..e2b0bb5b 100644 --- a/src/models.rs +++ b/src/models.rs @@ -977,6 +977,21 @@ pub trait ProgressListener: Send + Sync { fn delete_error(&self, info: ProgressInfo); } +pub struct NoOpProgressListener {} + +impl ProgressListener for NoOpProgressListener { + fn deployment_in_progress(&self, _info: ProgressInfo) {} + fn pause_in_progress(&self, _info: ProgressInfo) {} + fn delete_in_progress(&self, _info: ProgressInfo) {} + fn error(&self, _info: ProgressInfo) {} + fn deployed(&self, _info: ProgressInfo) {} + fn paused(&self, _info: ProgressInfo) {} + fn deleted(&self, _info: ProgressInfo) {} + fn deployment_error(&self, _info: ProgressInfo) {} + fn pause_error(&self, _info: ProgressInfo) {} + fn delete_error(&self, _info: ProgressInfo) {} +} + pub trait Listen { fn listeners(&self) -> &Listeners; fn add_listener(&mut self, listener: Listener); diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index b2d72010..44d41a7d 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -13,7 +13,7 @@ use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; use qovery_engine::logger::Logger; -use qovery_engine::models::Context; +use qovery_engine::models::{Context, NoOpProgressListener}; use std::str::FromStr; use std::sync::Arc; use tracing::error; @@ -49,6 +49,7 @@ pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(), secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(), secrets.AWS_DEFAULT_REGION.unwrap().as_str(), + Arc::new(Box::new(NoOpProgressListener {})), logger, ) .unwrap() diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 15eb7f30..4192c634 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -7,7 +7,7 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest}; +use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; @@ -33,7 +33,14 @@ pub const DO_SELF_HOSTED_DATABASE_DISK_TYPE: &str = "do-block-storage"; pub fn container_registry_digital_ocean(context: &Context) -> DOCR { let secrets = FuncTestsSecrets::new(); - DOCR::new(context.clone(), DOCR_ID, DOCR_ID, secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str()).unwrap() + DOCR::new( + context.clone(), + DOCR_ID, + DOCR_ID, + secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap() } pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { @@ -168,6 +175,7 @@ pub fn clean_environments( .DIGITAL_OCEAN_TOKEN .as_ref() .expect("DIGITAL_OCEAN_TOKEN is not set in secrets"), + Arc::new(Box::new(NoOpProgressListener {})), ); // FIXME: re-enable it, or let pleco do its job ? diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 1941a90b..ae0dfc1b 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -6,7 +6,7 @@ use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest}; +use qovery_engine::models::{Context, EnvironmentRequest, Listener, NoOpProgressListener}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; @@ -59,6 +59,7 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { scw_secret_key.as_str(), scw_default_project_id.as_str(), SCW_TEST_ZONE, + Arc::new(Box::new(NoOpProgressListener {})), ) .unwrap() } @@ -236,6 +237,7 @@ pub fn clean_environments( secret_token.as_str(), project_id.as_str(), zone, + Arc::new(Box::new(NoOpProgressListener {})), )?; // delete images created in registry From 47f4b43e5215405cb7948b01c975f4a341dabb7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Sat, 26 Mar 2022 23:29:12 +0100 Subject: [PATCH 79/85] fmt --- test_utilities/src/scaleway.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index ae0dfc1b..fa915f9c 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -6,7 +6,7 @@ use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest, Listener, NoOpProgressListener}; +use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; From 9d7d98c615985fbfc4accf49138dd7d3857e4e29 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 10:36:46 +0200 Subject: [PATCH 80/85] FIX CI --- tests/scaleway/scw_container_registry.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index aef046c2..7aae7645 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -3,6 +3,8 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; +use qovery_engine::models::NoOpProgressListener; +use std::sync::Arc; use tracing::debug; use uuid::Uuid; @@ -44,6 +46,7 @@ fn test_get_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + Arc::new(Box::new(NoOpProgressListener {})), ) .unwrap(); @@ -90,6 +93,7 @@ fn test_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + Arc::new(Box::new(NoOpProgressListener {})), ) .unwrap(); @@ -133,6 +137,7 @@ fn test_delete_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + Arc::new(Box::new(NoOpProgressListener {})), ) .unwrap(); @@ -170,6 +175,7 @@ fn test_get_or_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, + Arc::new(Box::new(NoOpProgressListener {})), ) .unwrap(); From 77815b627564860cc80fc4a4a123f46ad10e6b3e Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 10:45:43 +0200 Subject: [PATCH 81/85] Fix log message --- src/build_platform/local_docker.rs | 5 +++-- src/transaction.rs | 5 ----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 8f0bf51a..48490651 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -362,8 +362,9 @@ impl BuildPlatform for LocalDocker { // LOGGING let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); let msg = format!( - "📥 Cloning repository: {} to {:?}", - build.git_repository.url, repository_root_path + "📥 Cloning repository: {} to {}", + build.git_repository.url, + repository_root_path.to_string_lossy() ); listeners_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Application { id: app_id.clone() }, diff --git a/src/transaction.rs b/src/transaction.rs index e74d5470..76c4c012 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -489,11 +489,6 @@ impl<'a> Transaction<'a> { }; } - // 100 ms sleep to avoid race condition on last service status update - // Otherwise, the last status sent to the CORE is (sometimes) not the right one. - // Even by storing data at the micro seconds precision - thread::sleep(std::time::Duration::from_millis(100)); - let _ = match action_fn(environment) { Err(err) => { let rollback_result = match self.rollback() { From e02b48bd70e401d448cd289b0b864e93104b84b7 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 11:02:39 +0200 Subject: [PATCH 82/85] fix import --- src/transaction.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transaction.rs b/src/transaction.rs index 76c4c012..10eb6ee8 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -2,7 +2,6 @@ use crate::build_platform::BuildError; use crate::cloud_provider::environment::Environment; use std::cell::RefCell; use std::rc::Rc; -use std::thread; use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::service::{Action, Application, Service}; From 68fcbc73921b632061d374ee90285f290a6f1c1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Mon, 28 Mar 2022 16:56:18 +0200 Subject: [PATCH 83/85] Refacto application to avoid duplication (#669) --- src/build_platform/local_docker.rs | 4 +- src/build_platform/mod.rs | 2 +- src/cloud_provider/aws/databases/mongodb.rs | 4 +- src/cloud_provider/aws/databases/mysql.rs | 4 +- .../aws/databases/postgresql.rs | 4 +- src/cloud_provider/aws/databases/redis.rs | 4 +- src/cloud_provider/aws/databases/utilities.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 4 +- src/cloud_provider/aws/mod.rs | 3 +- src/cloud_provider/aws/regions.rs | 2 +- src/cloud_provider/aws/router.rs | 2 +- .../digitalocean/application.rs | 504 --------------- .../digitalocean/databases/mongodb.rs | 4 +- .../digitalocean/databases/mysql.rs | 4 +- .../digitalocean/databases/postgresql.rs | 4 +- .../digitalocean/databases/redis.rs | 4 +- .../digitalocean/kubernetes/mod.rs | 6 +- src/cloud_provider/digitalocean/mod.rs | 3 +- .../digitalocean/network/vpc.rs | 4 +- src/cloud_provider/digitalocean/router.rs | 2 +- src/cloud_provider/environment.rs | 7 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/mod.rs | 2 +- src/cloud_provider/scaleway/application.rs | 599 ------------------ .../scaleway/databases/mongodb.rs | 4 +- .../scaleway/databases/mysql.rs | 4 +- .../scaleway/databases/postgresql.rs | 4 +- .../scaleway/databases/redis.rs | 4 +- .../scaleway/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 6 +- src/cloud_provider/scaleway/mod.rs | 3 +- src/cloud_provider/scaleway/router.rs | 2 +- src/cloud_provider/service.rs | 15 +- src/cloud_provider/utilities.rs | 2 +- src/container_registry/docr.rs | 2 +- src/container_registry/ecr.rs | 4 +- src/container_registry/mod.rs | 2 +- .../scaleway_container_registry.rs | 5 +- src/dns_provider/cloudflare.rs | 2 +- src/dns_provider/mod.rs | 2 +- src/engine.rs | 2 +- src/errors/mod.rs | 2 +- src/events/mod.rs | 2 +- src/{models.rs => io_models.rs} | 157 ++--- src/lib.rs | 1 + src/logger.rs | 4 +- .../aws => models}/application.rs | 528 ++++++++------- src/models/aws/application.rs | 90 +++ src/models/aws/mod.rs | 43 ++ src/models/digital_ocean/application.rs | 91 +++ src/models/digital_ocean/mod.rs | 126 ++++ src/models/mod.rs | 5 + src/models/scaleway/application.rs | 103 +++ src/models/scaleway/mod.rs | 207 ++++++ src/models/types.rs | 19 + src/object_storage/mod.rs | 2 +- src/object_storage/s3.rs | 2 +- src/object_storage/scaleway_object_storage.rs | 4 +- src/object_storage/spaces.rs | 4 +- src/transaction.rs | 9 +- test_utilities/src/aws.rs | 2 +- test_utilities/src/cloudflare.rs | 2 +- test_utilities/src/common.rs | 10 +- test_utilities/src/digitalocean.rs | 4 +- test_utilities/src/scaleway.rs | 4 +- test_utilities/src/utilities.rs | 8 +- tests/aws/aws_databases.rs | 6 +- tests/aws/aws_environment.rs | 14 +- tests/digitalocean/do_databases.rs | 6 +- tests/digitalocean/do_environment.rs | 12 +- tests/digitalocean/do_kubernetes.rs | 2 +- tests/digitalocean/do_spaces.rs | 2 +- tests/digitalocean/do_whole_enchilada.rs | 2 +- tests/scaleway/scw_container_registry.rs | 4 +- tests/scaleway/scw_databases.rs | 6 +- tests/scaleway/scw_environment.rs | 12 +- tests/scaleway/scw_kubernetes.rs | 2 +- tests/scaleway/scw_whole_enchilada.rs | 2 +- 78 files changed, 1191 insertions(+), 1561 deletions(-) delete mode 100644 src/cloud_provider/digitalocean/application.rs delete mode 100644 src/cloud_provider/scaleway/application.rs rename src/{models.rs => io_models.rs} (92%) rename src/{cloud_provider/aws => models}/application.rs (54%) create mode 100644 src/models/aws/application.rs create mode 100644 src/models/aws/mod.rs create mode 100644 src/models/digital_ocean/application.rs create mode 100644 src/models/digital_ocean/mod.rs create mode 100644 src/models/mod.rs create mode 100644 src/models/scaleway/application.rs create mode 100644 src/models/scaleway/mod.rs create mode 100644 src/models/types.rs diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 48490651..9728a235 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -17,10 +17,10 @@ use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; const BUILD_DURATION_TIMEOUT_SEC: u64 = 30 * 60; diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 5804afa6..55a57eb0 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -5,8 +5,8 @@ use crate::cmd::command::CommandError; use crate::cmd::docker::DockerError; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; +use crate::io_models::{Context, Listen, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::{Context, Listen, QoveryIdentifier}; use crate::utilities::compute_image_tag; use std::fmt::{Display, Formatter, Result as FmtResult}; use std::hash::Hash; diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 230bc3d8..386a0b88 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -16,9 +16,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDbAws { diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs index d645aa6d..9dd5622f 100644 --- a/src/cloud_provider/aws/databases/mysql.rs +++ b/src/cloud_provider/aws/databases/mysql.rs @@ -17,9 +17,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, DatabaseKind, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, DatabaseKind, Listen, Listener, Listeners}; use ::function_name::named; pub struct MySQLAws { diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs index 07ec3678..0e881b2c 100644 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ b/src/cloud_provider/aws/databases/postgresql.rs @@ -17,9 +17,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct PostgreSQLAws { diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs index 9c90501b..3c7424ba 100644 --- a/src/cloud_provider/aws/databases/redis.rs +++ b/src/cloud_provider/aws/databases/redis.rs @@ -14,9 +14,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisAws { diff --git a/src/cloud_provider/aws/databases/utilities.rs b/src/cloud_provider/aws/databases/utilities.rs index 4b3b15e9..af52a341 100644 --- a/src/cloud_provider/aws/databases/utilities.rs +++ b/src/cloud_provider/aws/databases/utilities.rs @@ -1,6 +1,6 @@ use crate::cloud_provider::utilities::VersionsNumber; use crate::errors::CommandError; -use crate::models::DatabaseKind; +use crate::io_models::DatabaseKind; pub fn get_parameter_group_from_version( version: VersionsNumber, @@ -28,7 +28,7 @@ pub fn aws_final_snapshot_name(database_name: &str) -> String { mod tests_aws_databases_parameters { use crate::cloud_provider::aws::databases::utilities::get_parameter_group_from_version; use crate::cloud_provider::utilities::VersionsNumber; - use crate::models::DatabaseKind; + use crate::io_models::DatabaseKind; use std::str::FromStr; #[test] diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index c79927fb..09e93e87 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -38,11 +38,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString, }; +use crate::logger::Logger; use crate::object_storage::s3::S3; use crate::object_storage::ObjectStorage; use crate::string::terraform_list_format; diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index 3323de5e..7dba78d6 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -9,10 +9,9 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY}; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; use crate::runtime::block_on; -pub mod application; pub mod databases; pub mod kubernetes; pub mod regions; diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index 3ede17b0..5a719c07 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -1,6 +1,6 @@ use crate::cloud_provider::aws::regions::AwsZones::*; use crate::cloud_provider::aws::regions::RegionAndZoneErrors::*; -use crate::models::ToTerraformString; +use crate::io_models::ToTerraformString; use serde::{Deserialize, Serialize}; use std::fmt; use std::fmt::{Display, Formatter}; diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index 940cee44..769180f4 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::{to_engine_error, Timeout}; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterAws { diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs deleted file mode 100644 index c6d2784b..00000000 --- a/src/cloud_provider/digitalocean/application.rs +++ /dev/null @@ -1,504 +0,0 @@ -use tera::Context as TeraContext; - -use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; -use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; -use std::fmt; -use std::str::FromStr; - -pub struct ApplicationDo { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl ApplicationDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - ApplicationDo { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - build, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl Helm for ApplicationDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name, self.id), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/digitalocean/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for ApplicationDo { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl Application for ApplicationDo { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl ToTransmitter for ApplicationDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for ApplicationDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - - // This is specific to digital ocean as it is them that create the registry secret - // we don't have the hand on it - context.insert("registry_secret", "do-container-registry-secret-for-cluster"); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::Standard => "do-block-storage", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for ApplicationDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for ApplicationDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } -} - -impl Listen for ApplicationDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - Standard, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum DoRegion { - NewYorkCity1, - NewYorkCity2, - NewYorkCity3, - Amsterdam2, - Amsterdam3, - SanFrancisco1, - SanFrancisco2, - SanFrancisco3, - Singapore, - London, - Frankfurt, - Toronto, - Bangalore, -} - -impl DoRegion { - pub fn as_str(&self) -> &str { - match self { - DoRegion::NewYorkCity1 => "nyc1", - DoRegion::NewYorkCity2 => "nyc2", - DoRegion::NewYorkCity3 => "nyc3", - DoRegion::Amsterdam2 => "ams2", - DoRegion::Amsterdam3 => "ams3", - DoRegion::SanFrancisco1 => "sfo1", - DoRegion::SanFrancisco2 => "sfo2", - DoRegion::SanFrancisco3 => "sfo3", - DoRegion::Singapore => "sgp1", - DoRegion::London => "lon1", - DoRegion::Frankfurt => "fra1", - DoRegion::Toronto => "tor1", - DoRegion::Bangalore => "blr1", - } - } -} - -impl fmt::Display for DoRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - DoRegion::NewYorkCity1 => write!(f, "nyc1"), - DoRegion::NewYorkCity2 => write!(f, "nyc2"), - DoRegion::NewYorkCity3 => write!(f, "nyc3"), - DoRegion::Amsterdam2 => write!(f, "ams2"), - DoRegion::Amsterdam3 => write!(f, "ams3"), - DoRegion::SanFrancisco1 => write!(f, "sfo1"), - DoRegion::SanFrancisco2 => write!(f, "sfo2"), - DoRegion::SanFrancisco3 => write!(f, "sfo3"), - DoRegion::Singapore => write!(f, "sgp1"), - DoRegion::London => write!(f, "lon1"), - DoRegion::Frankfurt => write!(f, "fra1"), - DoRegion::Toronto => write!(f, "tor1"), - DoRegion::Bangalore => write!(f, "blr1"), - } - } -} - -impl FromStr for DoRegion { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "nyc1" => Ok(DoRegion::NewYorkCity1), - "nyc2" => Ok(DoRegion::NewYorkCity2), - "nyc3" => Ok(DoRegion::NewYorkCity3), - "ams2" => Ok(DoRegion::Amsterdam2), - "ams3" => Ok(DoRegion::Amsterdam3), - "sfo1" => Ok(DoRegion::SanFrancisco1), - "sfo2" => Ok(DoRegion::SanFrancisco2), - "sfo3" => Ok(DoRegion::SanFrancisco3), - "sgp1" => Ok(DoRegion::Singapore), - "lon1" => Ok(DoRegion::London), - "fra1" => Ok(DoRegion::Frankfurt), - "tor1" => Ok(DoRegion::Toronto), - "blr1" => Ok(DoRegion::Bangalore), - _ => { - return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); - } - } - } -} diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index 61c68859..1c179c2f 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDo { diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index b6d19aad..5bffb434 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MySQLDo { diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index db1837d1..2b47a106 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct PostgresDo { diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 0ae77e6f..a06684d9 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisDo { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 55fa89e2..8d58135d 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; use crate::cloud_provider::aws::regions::AwsZones; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ get_do_kubeconfig_by_cluster_name, get_do_latest_doks_slug_from_api, get_doks_info_from_name, @@ -40,11 +39,12 @@ use crate::events::Stage::Infrastructure; use crate::events::{ EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, }; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, ToHelmString, }; +use crate::logger::Logger; +use crate::models::digital_ocean::DoRegion; use crate::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 50ca7dc2..debccdf3 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -9,9 +9,8 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::DIGITAL_OCEAN_TOKEN; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod do_api_common; pub mod kubernetes; diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 563b9e26..76225984 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::models::vpc::{Vpc, Vpcs}; use crate::errors::CommandError; +use crate::models::digital_ocean::DoRegion; #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[serde(rename_all = "snake_case")] @@ -169,11 +169,11 @@ fn is_do_reserved_vpc_subnets(region: DoRegion, subnet: &str) -> bool { #[cfg(test)] mod tests_do_vpcs { - use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::network::vpc::{ do_get_vpcs_from_api_output, get_do_vpc_from_name, get_do_vpc_from_subnet, get_random_available_subnet, is_do_reserved_vpc_subnets, VpcInitKind, }; + use crate::models::digital_ocean::DoRegion; fn do_get_vpc_json() -> String { // https://developers.digitalocean.com/documentation/v2/#retrieve-an-existing-load-balancer diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index cd9662cc..1bc93804 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterDo { diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 624532a7..916e8759 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,4 +1,5 @@ -use crate::cloud_provider::service::{Action, Application, Database, Router, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; +use crate::models::application::IApplication; use crate::unit_conversion::cpu_string_to_float; pub struct Environment { @@ -8,7 +9,7 @@ pub struct Environment { pub owner_id: String, pub organization_id: String, pub action: Action, - pub applications: Vec>, + pub applications: Vec>, pub routers: Vec>, pub databases: Vec>, } @@ -20,7 +21,7 @@ impl Environment { owner_id: &str, organization_id: &str, action: Action, - applications: Vec>, + applications: Vec>, routers: Vec>, databases: Vec>, ) -> Self { diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 86d2f166..e1e90163 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -31,11 +31,11 @@ use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter}; use crate::fs::workspace_directory; -use crate::logger::Logger; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, }; +use crate::logger::Logger; use crate::object_storage::ObjectStorage; use crate::unit_conversion::{any_to_mi, cpu_string_to_float}; @@ -1434,8 +1434,8 @@ mod tests { use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd::structs::{KubernetesList, KubernetesNode, KubernetesVersion}; use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter}; + use crate::io_models::{ListenersHelper, QoveryIdentifier}; use crate::logger::StdIoLogger; - use crate::models::{ListenersHelper, QoveryIdentifier}; #[test] pub fn check_kubernetes_upgrade_method() { diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index eb9a412a..650b1d09 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -7,7 +7,7 @@ use crate::cloud_provider::environment::Environment; use crate::cloud_provider::kubernetes::Kubernetes; use crate::errors::EngineError; use crate::events::{EventDetails, Stage, ToTransmitter}; -use crate::models::{Context, Listen}; +use crate::io_models::{Context, Listen}; pub mod aws; pub mod digitalocean; diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs deleted file mode 100644 index 52a87c0f..00000000 --- a/src/cloud_provider/scaleway/application.rs +++ /dev/null @@ -1,599 +0,0 @@ -use std::fmt; -use std::str::FromStr; - -use tera::Context as TeraContext; - -use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; -use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; - -pub struct ApplicationScw { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl ApplicationScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - ApplicationScw { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - build, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl Helm for ApplicationScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/scaleway/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for ApplicationScw { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl Application for ApplicationScw { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl ToTransmitter for ApplicationScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for ApplicationScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - // TODO(benjaminch): Switch to proper storage class - // Note: Seems volume storage type are not supported, only blocked storage for the time being - // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass - StorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", - StorageType::LocalSsd => "l_ssd", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - // container registry credentials - context.insert( - "container_registry_docker_json_config", - self.build - .image - .clone() - .registry_docker_json_config - .unwrap_or_default() - .as_str(), - ); - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for ApplicationScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for ApplicationScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } -} - -impl Listen for ApplicationScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] -pub enum StorageType { - #[serde(rename = "b_ssd")] - BlockSsd, - #[serde(rename = "l_ssd")] - LocalSsd, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwRegion { - Paris, - Amsterdam, - Warsaw, -} - -impl ScwRegion { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwRegion::Paris => "fr-par", - ScwRegion::Amsterdam => "nl-ams", - ScwRegion::Warsaw => "pl-waw", - } - } -} - -impl fmt::Display for ScwRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwRegion::Paris => write!(f, "fr-par"), - ScwRegion::Amsterdam => write!(f, "nl-ams"), - ScwRegion::Warsaw => write!(f, "pl-waw"), - } - } -} - -impl FromStr for ScwRegion { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "fr-par" => Ok(ScwRegion::Paris), - "nl-ams" => Ok(ScwRegion::Amsterdam), - "pl-waw" => Ok(ScwRegion::Warsaw), - _ => Err(()), - } - } -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwZone { - Paris1, - Paris2, - Paris3, - Amsterdam1, - Warsaw1, -} - -impl ScwZone { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwZone::Paris1 => "fr-par-1", - ScwZone::Paris2 => "fr-par-2", - ScwZone::Paris3 => "fr-par-3", - ScwZone::Amsterdam1 => "nl-ams-1", - ScwZone::Warsaw1 => "pl-waw-1", - } - } - - pub fn region(&self) -> ScwRegion { - match self { - ScwZone::Paris1 => ScwRegion::Paris, - ScwZone::Paris2 => ScwRegion::Paris, - ScwZone::Paris3 => ScwRegion::Paris, - ScwZone::Amsterdam1 => ScwRegion::Amsterdam, - ScwZone::Warsaw1 => ScwRegion::Warsaw, - } - } - - // TODO(benjaminch): improve / refactor this! - pub fn region_str(&self) -> String { - match self { - ScwZone::Paris1 => "fr-par", - ScwZone::Paris2 => "fr-par", - ScwZone::Paris3 => "fr-par", - ScwZone::Amsterdam1 => "nl-ams", - ScwZone::Warsaw1 => "pl-waw", - } - .to_string() - } -} - -impl fmt::Display for ScwZone { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwZone::Paris1 => write!(f, "fr-par-1"), - ScwZone::Paris2 => write!(f, "fr-par-2"), - ScwZone::Paris3 => write!(f, "fr-par-3"), - ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), - ScwZone::Warsaw1 => write!(f, "pl-waw-1"), - } - } -} - -impl FromStr for ScwZone { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "fr-par-1" => Ok(ScwZone::Paris1), - "fr-par-2" => Ok(ScwZone::Paris2), - "fr-par-3" => Ok(ScwZone::Paris3), - "nl-ams-1" => Ok(ScwZone::Amsterdam1), - "pl-waw-1" => Ok(ScwZone::Warsaw1), - _ => { - return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{ScwRegion, ScwZone}; - use std::str::FromStr; - - #[test] - fn test_region_to_str() { - assert_eq!("fr-par", ScwRegion::Paris.as_str()); - assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); - assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); - } - - #[test] - fn test_region_from_str() { - assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); - assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); - assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); - } - - #[test] - fn test_zone_to_str() { - assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); - assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); - assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); - assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); - assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); - } - - #[test] - fn test_zone_from_str() { - assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); - assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); - assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); - assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); - assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); - } - - #[test] - fn test_zone_region() { - assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); - assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); - } -} diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index 160094a0..f1b39561 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDbScw { diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index f6ce64a1..6d33eb8f 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -13,9 +13,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index d64cbf27..d101ecbc 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -13,9 +13,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 6c7bc8a8..528152da 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisScw { diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 089322c6..0e5e6469 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -3,9 +3,9 @@ use crate::cloud_provider::helm::{ CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; -use crate::cloud_provider::scaleway::application::{ScwRegion, ScwZone}; use crate::cloud_provider::scaleway::kubernetes::KapsuleOptions; use crate::errors::CommandError; +use crate::models::scaleway::{ScwRegion, ScwZone}; use semver::Version; use serde::{Deserialize, Serialize}; use std::fs::File; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 960c0bce..4b3e8d78 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -10,7 +10,6 @@ use crate::cloud_provider::kubernetes::{ }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::scaleway::application::ScwZone; use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites}; use crate::cloud_provider::scaleway::kubernetes::node::{ScwInstancesType, ScwNodeGroup}; use crate::cloud_provider::utilities::print_action; @@ -23,10 +22,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, }; +use crate::logger::Logger; +use crate::models::scaleway::ScwZone; use crate::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; diff --git a/src/cloud_provider/scaleway/mod.rs b/src/cloud_provider/scaleway/mod.rs index fa03ff8c..ceaf8c3c 100644 --- a/src/cloud_provider/scaleway/mod.rs +++ b/src/cloud_provider/scaleway/mod.rs @@ -4,9 +4,8 @@ use uuid::Uuid; use crate::cloud_provider::{CloudProvider, EngineError, Kind, TerraformStateCredentials}; use crate::constants::{SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY}; use crate::events::{EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod kubernetes; pub mod router; diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index ee6a6e40..f7c3cb95 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterScw { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 216e69e1..b3111f51 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -5,7 +5,6 @@ use std::sync::mpsc::TryRecvError; use std::thread; use std::time::Duration; -use crate::build_platform::Build; use tera::Context as TeraContext; use crate::cloud_provider::environment::Environment; @@ -21,12 +20,12 @@ use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replica use crate::cmd::structs::LabelsContent; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; -use crate::logger::Logger; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Context, DatabaseMode, Listen, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; +use crate::logger::Logger; pub trait Service: ToTransmitter { fn context(&self) -> &Context; @@ -157,10 +156,6 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait Application: StatelessService { - fn get_build(&self) -> &Build; - fn get_build_mut(&mut self) -> &mut Build; -} pub trait Router: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; @@ -348,8 +343,8 @@ pub fn default_tera_context( context.insert("max_instances", &service.max_instances()); context.insert("is_private_port", &service.private_port().is_some()); - if service.private_port().is_some() { - context.insert("private_port", &service.private_port().unwrap()); + if let Some(private_port) = service.private_port() { + context.insert("private_port", &private_port); } context.insert("version", &service.version()); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 28a20894..4c51ac0c 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -4,8 +4,8 @@ use std::collections::HashMap; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventDetails, EventMessage}; +use crate::io_models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use crate::logger::Logger; -use crate::models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use chrono::Duration; use core::option::Option::{None, Some}; use core::result::Result; diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index f0134373..a90c1e73 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -7,7 +7,7 @@ use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::utilities; use url::Url; diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index ec9ef8ae..83363bdb 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -14,10 +14,10 @@ use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::events::{EngineEvent, EventMessage, GeneralStep, Stage}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; use crate::runtime::block_on; use retry::delay::Fixed; use retry::Error::Operation; diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 7f1bd271..28f10a5b 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -5,7 +5,7 @@ use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::errors::EngineError; use crate::events::{EventDetails, Stage, Transmitter}; -use crate::models::{Context, Listen, QoveryIdentifier}; +use crate::io_models::{Context, Listen, QoveryIdentifier}; pub mod docr; pub mod ecr; diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 7fab0e68..3382b07b 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -1,13 +1,12 @@ extern crate scaleway_api_rs; -use crate::cloud_provider::scaleway::application::ScwZone; - use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; use crate::cmd::docker; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::models::scaleway::ScwZone; use crate::runtime::block_on; use url::Url; diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 48e8939c..2ed36465 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -2,7 +2,7 @@ use std::net::Ipv4Addr; use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::{DnsProvider, Kind}; -use crate::models::{Context, Domain}; +use crate::io_models::{Context, Domain}; pub struct Cloudflare { context: Context, diff --git a/src/dns_provider/mod.rs b/src/dns_provider/mod.rs index ce52fa81..bba32a2d 100644 --- a/src/dns_provider/mod.rs +++ b/src/dns_provider/mod.rs @@ -3,7 +3,7 @@ use std::net::Ipv4Addr; use crate::dns_provider::errors::DnsProviderError; use serde::{Deserialize, Serialize}; -use crate::models::{Context, Domain}; +use crate::io_models::{Context, Domain}; pub mod cloudflare; pub mod errors; diff --git a/src/engine.rs b/src/engine.rs index a4c22bfe..3e296c2e 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -10,7 +10,7 @@ use crate::container_registry::ContainerRegistry; use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::DnsProvider; use crate::errors::EngineError; -use crate::models::Context; +use crate::io_models::Context; #[derive(Error, Debug, PartialEq)] pub enum EngineConfigError { diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 9cc9c1eb..22184db2 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -10,7 +10,7 @@ use crate::cmd::helm::HelmError; use crate::container_registry::errors::ContainerRegistryError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; use crate::object_storage::errors::ObjectStorageError; use std::fmt::{Display, Formatter}; use thiserror::Error; diff --git a/src/events/mod.rs b/src/events/mod.rs index eb5565f8..deb2eb19 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -8,7 +8,7 @@ extern crate url; use crate::cloud_provider::Kind; use crate::errors::{CommandError, EngineError}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; use std::fmt::{Display, Formatter}; #[derive(Debug, Clone)] diff --git a/src/models.rs b/src/io_models.rs similarity index 92% rename from src/models.rs rename to src/io_models.rs index e2b0bb5b..ab929c25 100644 --- a/src/models.rs +++ b/src/io_models.rs @@ -14,20 +14,17 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; -use crate::cloud_provider::aws::application::ApplicationAws; use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; use crate::cloud_provider::aws::databases::mysql::MySQLAws; use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; use crate::cloud_provider::aws::databases::redis::RedisAws; use crate::cloud_provider::aws::router::RouterAws; -use crate::cloud_provider::digitalocean::application::ApplicationDo; use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; use crate::cloud_provider::digitalocean::databases::redis::RedisDo; use crate::cloud_provider::digitalocean::router::RouterDo; use crate::cloud_provider::environment::Environment; -use crate::cloud_provider::scaleway::application::ApplicationScw; use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; @@ -40,6 +37,12 @@ use crate::cloud_provider::Kind as CPKind; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; +use crate::models; +use crate::models::application::IApplication; +use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; +use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; +use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; +use crate::models::types::{AWS, DO, SCW}; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -219,65 +222,77 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(ApplicationAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), - CPKind::Do => Some(Box::new(ApplicationDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), - CPKind::Scw => Some(Box::new(ApplicationScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), + CPKind::Aws => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + AwsAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), + CPKind::Do => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + DoAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), + CPKind::Scw => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + ScwAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), } } @@ -439,17 +454,15 @@ pub enum StorageType { } impl Storage { - pub fn to_aws_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_aws_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), storage_type: match self.storage_type { - StorageType::SlowHdd => crate::cloud_provider::aws::application::StorageType::SC1, - StorageType::Hdd => crate::cloud_provider::aws::application::StorageType::ST1, - StorageType::Ssd => crate::cloud_provider::aws::application::StorageType::GP2, - StorageType::FastSsd => crate::cloud_provider::aws::application::StorageType::IO1, + StorageType::SlowHdd => AwsStorageType::SC1, + StorageType::Hdd => AwsStorageType::ST1, + StorageType::Ssd => AwsStorageType::GP2, + StorageType::FastSsd => AwsStorageType::IO1, }, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), @@ -457,26 +470,22 @@ impl Storage { } } - pub fn to_do_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_do_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::digitalocean::application::StorageType::Standard, + storage_type: DoStorageType::Standard, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, } } - pub fn to_scw_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_scw_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::scaleway::application::StorageType::BlockSsd, + storage_type: ScwStorageType::BlockSsd, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, @@ -1314,7 +1323,7 @@ impl ToTerraformString for Ipv4Addr { #[cfg(test)] mod tests { - use crate::models::{Domain, QoveryIdentifier}; + use crate::io_models::{Domain, QoveryIdentifier}; #[test] fn test_domain_new() { diff --git a/src/lib.rs b/src/lib.rs index 2d0cb7b2..00177df1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,7 @@ pub mod errors; pub mod events; pub mod fs; pub mod git; +pub mod io_models; pub mod logger; pub mod models; pub mod object_storage; diff --git a/src/logger.rs b/src/logger.rs index 02f62444..4497f4b9 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -72,12 +72,12 @@ impl Logger for StdIoLogger { #[cfg(test)] mod tests { use super::*; - use crate::cloud_provider::scaleway::application::ScwRegion; use crate::cloud_provider::Kind; use crate::errors; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; - use crate::models::QoveryIdentifier; + use crate::io_models::QoveryIdentifier; + use crate::models::scaleway::ScwRegion; use tracing_test::traced_test; use url::Url; use uuid::Uuid; diff --git a/src/cloud_provider/aws/application.rs b/src/models/application.rs similarity index 54% rename from src/cloud_provider/aws/application.rs rename to src/models/application.rs index 49f3230f..b7a01601 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/models/application.rs @@ -1,45 +1,52 @@ -use tera::Context as TeraContext; - use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; +use crate::cloud_provider::models::{EnvironmentVariable, Storage}; +use crate::cloud_provider::service::{delete_stateless_service, scale_down_application}; use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, + deploy_stateless_service_error, deploy_user_stateless_service, send_progress_on_long_task, Action, Create, Delete, + Helm, Pause, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::EngineError; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; +use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; +use crate::models::types::CloudProvider; +use function_name::named; +use std::marker::PhantomData; +use tera::Context as TeraContext; -pub struct ApplicationAws { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, +#[derive(thiserror::Error, Debug)] +pub enum ApplicationError { + #[error("Application invalid configuration: {0}")] + InvalidConfig(String), } -impl ApplicationAws { +pub struct Application { + _marker: PhantomData, + pub(crate) context: Context, + pub(crate) id: String, + pub(crate) action: Action, + pub(crate) name: String, + pub(crate) ports: Vec, + pub(crate) total_cpus: String, + pub(crate) cpu_burst: String, + pub(crate) total_ram_in_mib: u32, + pub(crate) min_instances: u32, + pub(crate) max_instances: u32, + pub(crate) start_timeout_in_seconds: u32, + pub(crate) build: Build, + pub(crate) storage: Vec>, + pub(crate) environment_variables: Vec, + pub(crate) listeners: Listeners, + pub(crate) logger: Box, + pub(crate) _extra_settings: T::AppExtraSettings, +} + +// Here we define the common behavior among all providers +impl Application { pub fn new( context: Context, id: &str, @@ -53,12 +60,16 @@ impl ApplicationAws { max_instances: u32, start_timeout_in_seconds: u32, build: Build, - storage: Vec>, + storage: Vec>, environment_variables: Vec, + extra_settings: T::AppExtraSettings, listeners: Listeners, logger: Box, - ) -> Self { - ApplicationAws { + ) -> Result { + // TODO: Check that the information provided are coherent + + Ok(Self { + _marker: PhantomData, context, id: id.to_string(), action, @@ -75,33 +86,220 @@ impl ApplicationAws { environment_variables, listeners, logger, - } + _extra_settings: extra_settings, + }) } - fn is_stateful(&self) -> bool { + pub fn is_stateful(&self) -> bool { !self.storage.is_empty() } - fn cloud_provider_name(&self) -> &str { - "aws" + pub fn context(&self) -> &Context { + &self.context } - fn struct_name(&self) -> &str { - "application" + pub fn service_type(&self) -> ServiceType { + ServiceType::Application + } + + pub fn id(&self) -> &str { + self.id.as_str() + } + + pub fn name(&self) -> &str { + self.name.as_str() + } + + pub fn commit_id(&self) -> String { + self.build.image.commit_id.clone() + } + + pub fn action(&self) -> &Action { + &self.action + } + + pub fn public_port(&self) -> Option { + self.ports + .iter() + .find(|port| port.publicly_accessible) + .map(|port| port.port as u16) + } + + pub fn start_timeout(&self) -> u32 { + (self.start_timeout_in_seconds + 10) * 4 + } + + pub fn total_cpus(&self) -> String { + self.total_cpus.to_string() + } + + pub fn cpu_burst(&self) -> String { + self.cpu_burst.to_string() + } + + pub fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib + } + + pub fn min_instances(&self) -> u32 { + self.min_instances + } + + pub fn max_instances(&self) -> u32 { + self.max_instances + } + + pub fn publicly_accessible(&self) -> bool { + self.public_port().is_some() + } + + pub fn logger(&self) -> &dyn Logger { + &*self.logger + } + + pub fn selector(&self) -> Option { + Some(format!("appId={}", self.id())) + } + + pub fn build(&self) -> &Build { + &self.build + } + + pub fn build_mut(&mut self) -> &mut Build { + &mut self.build + } + + pub fn sanitize_name(&self) -> String { + sanitize_name("app", self.id()) + } + + pub(crate) fn get_event_details(&self, stage: Stage) -> EventDetails { + let context = self.context(); + EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + self.to_transmitter(), + ) } } -impl Helm for ApplicationAws { +// Traits implementations +impl ToTransmitter for Application { + fn to_transmitter(&self) -> Transmitter { + Transmitter::Application(self.id.to_string(), self.name.to_string()) + } +} + +impl Listen for Application { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} + +pub(crate) trait ToTeraContext { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result; +} + +impl Service for Application +where + Application: ToTeraContext, +{ + fn context(&self) -> &Context { + self.context() + } + + fn service_type(&self) -> ServiceType { + self.service_type() + } + + fn id(&self) -> &str { + self.id() + } + + fn name(&self) -> &str { + self.name() + } + + fn sanitized_name(&self) -> String { + self.sanitize_name() + } + + fn version(&self) -> String { + self.commit_id() + } + + fn action(&self) -> &Action { + self.action() + } + + fn private_port(&self) -> Option { + self.public_port() + } + + fn start_timeout(&self) -> Timeout { + Timeout::Value(self.start_timeout()) + } + + fn total_cpus(&self) -> String { + self.total_cpus() + } + + fn cpu_burst(&self) -> String { + self.cpu_burst() + } + + fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib() + } + + fn min_instances(&self) -> u32 { + self.min_instances() + } + + fn max_instances(&self) -> u32 { + self.max_instances() + } + + fn publicly_accessible(&self) -> bool { + self.publicly_accessible() + } + + fn tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context(target) + } + + fn logger(&self) -> &dyn Logger { + self.logger() + } + + fn selector(&self) -> Option { + self.selector() + } +} + +impl Helm for Application { fn helm_selector(&self) -> Option { self.selector() } fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) + crate::string::cut(format!("application-{}-{}", self.id(), self.id()), 50) } fn helm_chart_dir(&self) -> String { - format!("{}/aws/charts/q-application", self.context.lib_root_dir()) + format!( + "{}/{}/charts/q-application", + self.context.lib_root_dir(), + T::helm_directory_name(), + ) } fn helm_chart_values_dir(&self) -> String { @@ -113,185 +311,16 @@ impl Helm for ApplicationAws { } } -impl StatelessService for ApplicationAws { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl ToTransmitter for ApplicationAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id.to_string(), self.name.to_string()) - } -} - -impl Application for ApplicationAws { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl Service for ApplicationAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let mut context = default_tera_context(self, target.kubernetes, target.environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - context.insert("registry_secret", self.build.image.registry_host()); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::SC1 => "sc1", - StorageType::ST1 => "st1", - StorageType::GP2 => "gp2", - StorageType::IO1 => "io1", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationAws { +impl Create for Application +where + Application: Service, +{ #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -310,8 +339,8 @@ impl Create for ApplicationAws { fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -324,13 +353,16 @@ impl Create for ApplicationAws { } } -impl Pause for ApplicationAws { +impl Pause for Application +where + Application: Service, +{ #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -350,8 +382,8 @@ impl Pause for ApplicationAws { fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -362,13 +394,16 @@ impl Pause for ApplicationAws { } } -impl Delete for ApplicationAws { +impl Delete for Application +where + Application: Service, +{ #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details.clone(), @@ -388,8 +423,8 @@ impl Delete for ApplicationAws { fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details.clone(), @@ -402,20 +437,29 @@ impl Delete for ApplicationAws { } } -impl Listen for ApplicationAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); +impl StatelessService for Application +where + Application: Service, +{ + fn as_stateless_service(&self) -> &dyn StatelessService { + self } } -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - SC1, - ST1, - GP2, - IO1, +pub trait IApplication: StatelessService { + fn get_build(&self) -> &Build; + fn get_build_mut(&mut self) -> &mut Build; +} + +impl IApplication for Application +where + Application: Service, +{ + fn get_build(&self) -> &Build { + self.build() + } + + fn get_build_mut(&mut self) -> &mut Build { + self.build_mut() + } } diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs new file mode 100644 index 00000000..1d21f284 --- /dev/null +++ b/src/models/aws/application.rs @@ -0,0 +1,90 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::aws::AwsStorageType; +use crate::models::types::AWS; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let mut context = default_tera_context(self, target.kubernetes, target.environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build().image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.build().image.registry_host()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + AwsStorageType::SC1 => "sc1", + AwsStorageType::ST1 => "st1", + AwsStorageType::GP2 => "gp2", + AwsStorageType::IO1 => "io1", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/aws/mod.rs b/src/models/aws/mod.rs new file mode 100644 index 00000000..2bbfbc4f --- /dev/null +++ b/src/models/aws/mod.rs @@ -0,0 +1,43 @@ +pub mod application; + +use crate::models::types::CloudProvider; +use crate::models::types::AWS; + +pub struct AwsAppExtraSettings {} +pub struct AwsDbExtraSettings {} +pub struct AwsRouterExtraSettings {} + +impl CloudProvider for AWS { + type AppExtraSettings = AwsAppExtraSettings; + type DbExtraSettings = AwsDbExtraSettings; + type RouterExtraSettings = AwsRouterExtraSettings; + type StorageTypes = AwsStorageType; + + fn short_name() -> &'static str { + "AWS" + } + + fn full_name() -> &'static str { + "Amazon Web Service" + } + + fn registry_short_name() -> &'static str { + "ECR" + } + + fn registry_full_name() -> &'static str { + "Elastic Container Registry" + } + + fn helm_directory_name() -> &'static str { + "aws" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum AwsStorageType { + SC1, + ST1, + GP2, + IO1, +} diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs new file mode 100644 index 00000000..5a2e7a61 --- /dev/null +++ b/src/models/digital_ocean/application.rs @@ -0,0 +1,91 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::digital_ocean::DoStorageType; +use crate::models::types::DO; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + + // This is specific to digital ocean as it is them that create the registry secret + // we don't have the hand on it + context.insert("registry_secret", "do-container-registry-secret-for-cluster"); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + DoStorageType::Standard => "do-block-storage", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/digital_ocean/mod.rs b/src/models/digital_ocean/mod.rs new file mode 100644 index 00000000..30c11461 --- /dev/null +++ b/src/models/digital_ocean/mod.rs @@ -0,0 +1,126 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::DO; +use std::fmt; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; + +pub struct DoAppExtraSettings {} +pub struct DoDbExtraSettings {} +pub struct DoRouterExtraSettings {} + +impl CloudProvider for DO { + type AppExtraSettings = DoAppExtraSettings; + type DbExtraSettings = DoDbExtraSettings; + type RouterExtraSettings = DoRouterExtraSettings; + type StorageTypes = DoStorageType; + + fn short_name() -> &'static str { + "DO" + } + + fn full_name() -> &'static str { + "Digital Ocean" + } + + fn registry_short_name() -> &'static str { + "DO CR" + } + + fn registry_full_name() -> &'static str { + "Digital Ocean Container Registry" + } + + fn helm_directory_name() -> &'static str { + "digitalocean" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum DoStorageType { + Standard, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum DoRegion { + NewYorkCity1, + NewYorkCity2, + NewYorkCity3, + Amsterdam2, + Amsterdam3, + SanFrancisco1, + SanFrancisco2, + SanFrancisco3, + Singapore, + London, + Frankfurt, + Toronto, + Bangalore, +} + +impl DoRegion { + pub fn as_str(&self) -> &str { + match self { + DoRegion::NewYorkCity1 => "nyc1", + DoRegion::NewYorkCity2 => "nyc2", + DoRegion::NewYorkCity3 => "nyc3", + DoRegion::Amsterdam2 => "ams2", + DoRegion::Amsterdam3 => "ams3", + DoRegion::SanFrancisco1 => "sfo1", + DoRegion::SanFrancisco2 => "sfo2", + DoRegion::SanFrancisco3 => "sfo3", + DoRegion::Singapore => "sgp1", + DoRegion::London => "lon1", + DoRegion::Frankfurt => "fra1", + DoRegion::Toronto => "tor1", + DoRegion::Bangalore => "blr1", + } + } +} + +impl Display for DoRegion { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + DoRegion::NewYorkCity1 => write!(f, "nyc1"), + DoRegion::NewYorkCity2 => write!(f, "nyc2"), + DoRegion::NewYorkCity3 => write!(f, "nyc3"), + DoRegion::Amsterdam2 => write!(f, "ams2"), + DoRegion::Amsterdam3 => write!(f, "ams3"), + DoRegion::SanFrancisco1 => write!(f, "sfo1"), + DoRegion::SanFrancisco2 => write!(f, "sfo2"), + DoRegion::SanFrancisco3 => write!(f, "sfo3"), + DoRegion::Singapore => write!(f, "sgp1"), + DoRegion::London => write!(f, "lon1"), + DoRegion::Frankfurt => write!(f, "fra1"), + DoRegion::Toronto => write!(f, "tor1"), + DoRegion::Bangalore => write!(f, "blr1"), + } + } +} + +impl FromStr for DoRegion { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "nyc1" => Ok(DoRegion::NewYorkCity1), + "nyc2" => Ok(DoRegion::NewYorkCity2), + "nyc3" => Ok(DoRegion::NewYorkCity3), + "ams2" => Ok(DoRegion::Amsterdam2), + "ams3" => Ok(DoRegion::Amsterdam3), + "sfo1" => Ok(DoRegion::SanFrancisco1), + "sfo2" => Ok(DoRegion::SanFrancisco2), + "sfo3" => Ok(DoRegion::SanFrancisco3), + "sgp1" => Ok(DoRegion::Singapore), + "lon1" => Ok(DoRegion::London), + "fra1" => Ok(DoRegion::Frankfurt), + "tor1" => Ok(DoRegion::Toronto), + "blr1" => Ok(DoRegion::Bangalore), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); + } + } + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 00000000..8db33ab5 --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,5 @@ +pub mod application; +pub mod aws; +pub mod digital_ocean; +pub mod scaleway; +pub mod types; diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs new file mode 100644 index 00000000..2b14300f --- /dev/null +++ b/src/models/scaleway/application.rs @@ -0,0 +1,103 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::scaleway::ScwStorageType; +use crate::models::types::SCW; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + // TODO(benjaminch): Switch to proper storage class + // Note: Seems volume storage type are not supported, only blocked storage for the time being + // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass + ScwStorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", + ScwStorageType::LocalSsd => "l_ssd", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + // container registry credentials + context.insert( + "container_registry_docker_json_config", + self.build + .image + .clone() + .registry_docker_json_config + .unwrap_or_default() + .as_str(), + ); + + Ok(context) + } +} diff --git a/src/models/scaleway/mod.rs b/src/models/scaleway/mod.rs new file mode 100644 index 00000000..ab73a293 --- /dev/null +++ b/src/models/scaleway/mod.rs @@ -0,0 +1,207 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::SCW; +use std::fmt; +use std::str::FromStr; + +pub struct ScwAppExtraSettings {} +pub struct ScwDbExtraSettings {} +pub struct ScwRouterExtraSettings {} + +impl CloudProvider for SCW { + type AppExtraSettings = ScwAppExtraSettings; + type DbExtraSettings = ScwDbExtraSettings; + type RouterExtraSettings = ScwRouterExtraSettings; + type StorageTypes = ScwStorageType; + + fn short_name() -> &'static str { + "SCW" + } + + fn full_name() -> &'static str { + "Scaleway" + } + + fn registry_short_name() -> &'static str { + "SCW CR" + } + + fn registry_full_name() -> &'static str { + "Scaleway Container Registry" + } + + fn helm_directory_name() -> &'static str { + "scaleway" + } +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] +pub enum ScwStorageType { + #[serde(rename = "b_ssd")] + BlockSsd, + #[serde(rename = "l_ssd")] + LocalSsd, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwRegion { + Paris, + Amsterdam, + Warsaw, +} + +impl ScwRegion { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwRegion::Paris => "fr-par", + ScwRegion::Amsterdam => "nl-ams", + ScwRegion::Warsaw => "pl-waw", + } + } +} + +impl fmt::Display for ScwRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwRegion::Paris => write!(f, "fr-par"), + ScwRegion::Amsterdam => write!(f, "nl-ams"), + ScwRegion::Warsaw => write!(f, "pl-waw"), + } + } +} + +impl FromStr for ScwRegion { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "fr-par" => Ok(ScwRegion::Paris), + "nl-ams" => Ok(ScwRegion::Amsterdam), + "pl-waw" => Ok(ScwRegion::Warsaw), + _ => Err(()), + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwZone { + Paris1, + Paris2, + Paris3, + Amsterdam1, + Warsaw1, +} + +impl ScwZone { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwZone::Paris1 => "fr-par-1", + ScwZone::Paris2 => "fr-par-2", + ScwZone::Paris3 => "fr-par-3", + ScwZone::Amsterdam1 => "nl-ams-1", + ScwZone::Warsaw1 => "pl-waw-1", + } + } + + pub fn region(&self) -> ScwRegion { + match self { + ScwZone::Paris1 => ScwRegion::Paris, + ScwZone::Paris2 => ScwRegion::Paris, + ScwZone::Paris3 => ScwRegion::Paris, + ScwZone::Amsterdam1 => ScwRegion::Amsterdam, + ScwZone::Warsaw1 => ScwRegion::Warsaw, + } + } + + // TODO(benjaminch): improve / refactor this! + pub fn region_str(&self) -> String { + match self { + ScwZone::Paris1 => "fr-par", + ScwZone::Paris2 => "fr-par", + ScwZone::Paris3 => "fr-par", + ScwZone::Amsterdam1 => "nl-ams", + ScwZone::Warsaw1 => "pl-waw", + } + .to_string() + } +} + +impl fmt::Display for ScwZone { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwZone::Paris1 => write!(f, "fr-par-1"), + ScwZone::Paris2 => write!(f, "fr-par-2"), + ScwZone::Paris3 => write!(f, "fr-par-3"), + ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), + ScwZone::Warsaw1 => write!(f, "pl-waw-1"), + } + } +} + +impl FromStr for ScwZone { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "fr-par-1" => Ok(ScwZone::Paris1), + "fr-par-2" => Ok(ScwZone::Paris2), + "fr-par-3" => Ok(ScwZone::Paris3), + "nl-ams-1" => Ok(ScwZone::Amsterdam1), + "pl-waw-1" => Ok(ScwZone::Warsaw1), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::{ScwRegion, ScwZone}; + use std::str::FromStr; + + #[test] + fn test_region_to_str() { + assert_eq!("fr-par", ScwRegion::Paris.as_str()); + assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); + assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); + } + + #[test] + fn test_region_from_str() { + assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); + assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); + assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); + } + + #[test] + fn test_zone_to_str() { + assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); + assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); + assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); + assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); + assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); + } + + #[test] + fn test_zone_from_str() { + assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); + assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); + assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); + assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); + assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); + } + + #[test] + fn test_zone_region() { + assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); + assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); + } +} diff --git a/src/models/types.rs b/src/models/types.rs new file mode 100644 index 00000000..75fef0ea --- /dev/null +++ b/src/models/types.rs @@ -0,0 +1,19 @@ +// Those types are just marker types that are use to tag our struct/object model +pub struct AWS {} +pub struct DO {} +pub struct SCW {} + +// CloudProvider trait allows to derive all the custom type we need per provider, +// with our marker type defined above to be able to select the correct one +pub trait CloudProvider { + type AppExtraSettings; + type DbExtraSettings; + type RouterExtraSettings; + type StorageTypes; + + fn short_name() -> &'static str; + fn full_name() -> &'static str; + fn registry_short_name() -> &'static str; + fn registry_full_name() -> &'static str; + fn helm_directory_name() -> &'static str; +} diff --git a/src/object_storage/mod.rs b/src/object_storage/mod.rs index baa4de6a..27684d8a 100644 --- a/src/object_storage/mod.rs +++ b/src/object_storage/mod.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::errors::ObjectStorageError; use std::fs::File; diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 886d962f..0de654aa 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -14,7 +14,7 @@ use rusoto_s3::{ }; use tokio::io; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime::block_on; diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index 664af26f..1b0e376d 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -2,10 +2,10 @@ use chrono::{DateTime, Utc}; use std::fs::File; use std::path::Path; -use crate::cloud_provider::scaleway::application::ScwZone; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::{Kind, ObjectStorage}; +use crate::models::scaleway::ScwZone; use crate::object_storage::errors::ObjectStorageError; use crate::runtime::block_on; use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index 1785b29e..b337f060 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -11,8 +11,8 @@ use rusoto_s3::{ }; use tokio::io; -use crate::cloud_provider::digitalocean::application::DoRegion; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; +use crate::models::digital_ocean::DoRegion; use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime; diff --git a/src/transaction.rs b/src/transaction.rs index 10eb6ee8..785c3b6e 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -4,14 +4,17 @@ use std::cell::RefCell; use std::rc::Rc; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::{Action, Application, Service}; +use crate::cloud_provider::service::{Action, Service}; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::to_engine_error; use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, Transmitter}; +use crate::io_models::{ + EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, +}; use crate::logger::Logger; -use crate::models::{EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier}; +use crate::models::application::IApplication; pub struct Transaction<'a> { engine: &'a EngineConfig, @@ -128,7 +131,7 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - applications: &mut [Box], + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 44d41a7d..36fb944c 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -12,8 +12,8 @@ use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; +use qovery_engine::io_models::{Context, NoOpProgressListener}; use qovery_engine::logger::Logger; -use qovery_engine::models::{Context, NoOpProgressListener}; use std::str::FromStr; use std::sync::Arc; use tracing::error; diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs index 037b2456..6cee1e77 100644 --- a/test_utilities/src/cloudflare.rs +++ b/test_utilities/src/cloudflare.rs @@ -2,7 +2,7 @@ use crate::common::ClusterDomain; use crate::utilities::FuncTestsSecrets; use qovery_engine::dns_provider::cloudflare::Cloudflare; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{Context, Domain}; +use qovery_engine::io_models::{Context, Domain}; pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { let secrets = FuncTestsSecrets::new(); diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 047f8e03..b07d30c1 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -6,7 +6,7 @@ use std::cell::RefCell; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{ +use qovery_engine::io_models::{ Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; @@ -22,13 +22,11 @@ use base64; use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; use qovery_engine::cloud_provider::environment::Environment; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, Kind}; @@ -36,8 +34,10 @@ use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::CONTAINER; use qovery_engine::logger::Logger; -use qovery_engine::models::DatabaseMode::CONTAINER; +use qovery_engine::models::digital_ocean::DoRegion; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::transaction::{DeploymentOption, Transaction, TransactionResult}; use std::collections::BTreeMap; use std::path::Path; @@ -1116,7 +1116,7 @@ pub fn test_db( app.environment_vars = db_infos.app_env_vars.clone(); app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 4192c634..36a5db93 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -7,18 +7,18 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; +use qovery_engine::models::digital_ocean::DoRegion; pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const DO_KUBERNETES_MINOR_VERSION: u8 = 20; diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index fa915f9c..d3c570bf 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,12 +1,11 @@ use const_format::formatcp; use qovery_engine::build_platform::Build; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; @@ -22,6 +21,7 @@ use qovery_engine::container_registry::errors::ContainerRegistryError; use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::logger::Logger; +use qovery_engine::models::scaleway::ScwZone; use tracing::error; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index eb3798b5..3b553377 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -11,7 +11,6 @@ use gethostname; use std::collections::BTreeMap; use std::io::{Error, ErrorKind, Write}; use std::path::Path; -use std::str::FromStr; use passwords::PasswordGenerator; use qovery_engine::cloud_provider::digitalocean::kubernetes::doks_api::get_do_kubeconfig_by_cluster_name; @@ -21,6 +20,7 @@ use retry::delay::Fibonacci; use retry::OperationResult; use std::env; use std::fs; +use std::str::FromStr; use tracing::{info, warn}; use crate::scaleway::{ @@ -29,14 +29,13 @@ use crate::scaleway::{ }; use hashicorp_vault; use qovery_engine::build_platform::local_docker::LocalDocker; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd; use qovery_engine::constants::{ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID, DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY, }; -use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; +use qovery_engine::io_models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; use retry::Error::Operation; use serde::{Deserialize, Serialize}; @@ -50,8 +49,9 @@ use qovery_engine::cmd::docker::Docker; use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc}; use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC}; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::MANAGED; use qovery_engine::logger::{Logger, StdIoLogger}; -use qovery_engine::models::DatabaseMode::MANAGED; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::runtime::block_on; use time::Instant; use url::Url; diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 5e7a8d51..e201076c 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -2,7 +2,7 @@ extern crate test_utilities; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; @@ -10,7 +10,7 @@ use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TY use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; use test_utilities::common::{test_db, Infrastructure}; @@ -304,7 +304,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index c90ae51b..d45cc26f 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -389,7 +389,7 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -460,7 +460,7 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -583,7 +583,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -665,7 +665,7 @@ fn redeploy_same_app_with_ebs() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); let environment_check2 = environment.clone(); @@ -760,7 +760,7 @@ fn deploy_a_not_working_environment_and_after_working_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -838,7 +838,7 @@ fn deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index fcd475b7..48826a55 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -2,13 +2,13 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::{database_test_environment, test_db, Infrastructure}; use test_utilities::digitalocean::{ clean_environments, do_default_engine_config, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, @@ -332,7 +332,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index a15cf579..756422a6 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -331,7 +331,7 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -459,7 +459,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -542,7 +542,7 @@ fn digitalocean_doks_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -649,7 +649,7 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -728,7 +728,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index b91fbcb7..653e2ea2 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -4,8 +4,8 @@ use self::test_utilities::common::ClusterDomain; use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterTestType}; #[cfg(feature = "test-do-infra")] diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index e214ede1..3e6b9e8d 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -1,4 +1,4 @@ -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; +use qovery_engine::models::digital_ocean::DoRegion; use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use qovery_engine::object_storage::ObjectStorage; use tempfile::NamedTempFile; diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index 3a53d40c..2851c16f 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -1,6 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index 7aae7645..8ea49c8a 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,9 +1,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; -use qovery_engine::models::NoOpProgressListener; +use qovery_engine::io_models::NoOpProgressListener; +use qovery_engine::models::scaleway::ScwZone; use std::sync::Arc; use tracing::debug; use uuid::Uuid; diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 489d7e4b..128e8917 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -2,14 +2,14 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, generate_password, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::test_db; use test_utilities::common::{database_test_environment, Infrastructure}; use test_utilities::scaleway::{ @@ -337,7 +337,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index ca0930a3..2fb6cdf1 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -341,7 +341,7 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -474,7 +474,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -642,7 +642,7 @@ fn scaleway_kapsule_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -751,7 +751,7 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -835,7 +835,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 0e6075be..952cc24d 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -4,8 +4,8 @@ use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETE use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-scw-infra")] diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index 2f6be0f9..bef7262d 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,6 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; From d98e6893a85006c374d47f4951fb7e50844b7172 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 17:45:23 +0200 Subject: [PATCH 84/85] Cleanup --- src/models/application.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/models/application.rs b/src/models/application.rs index b7a01601..8eba144b 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -326,9 +326,7 @@ where event_details, self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) + send_progress_on_long_task(self, Action::Create, || deploy_user_stateless_service(target, self)) } fn on_create_check(&self) -> Result<(), EngineError> { @@ -347,9 +345,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) + send_progress_on_long_task(self, Action::Create, || deploy_stateless_service_error(target, self)) } } @@ -369,7 +365,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { + send_progress_on_long_task(self, Action::Pause, || { scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) }) } @@ -410,7 +406,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { + send_progress_on_long_task(self, Action::Delete, || { delete_stateless_service(target, self, event_details.clone()) }) } @@ -431,7 +427,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { + send_progress_on_long_task(self, Action::Delete, || { delete_stateless_service(target, self, event_details.clone()) }) } From 46cabb498cc01710b5673303b00c3c79cf660d26 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 18:06:42 +0200 Subject: [PATCH 85/85] Cleanup --- src/io_models.rs | 151 +++++++++++++++++------------------ test_utilities/src/common.rs | 84 ++++++++++--------- 2 files changed, 120 insertions(+), 115 deletions(-) diff --git a/src/io_models.rs b/src/io_models.rs index ab929c25..e9410a96 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -38,7 +38,7 @@ use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; -use crate::models::application::IApplication; +use crate::models::application::{ApplicationError, IApplication}; use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; @@ -112,16 +112,18 @@ impl EnvironmentRequest { cloud_provider: &dyn CloudProvider, container_registry: &ContainerRegistryInfo, logger: Box, - ) -> Environment { - //FIXME: remove those flatten as it hide errors regarding conversion to model data type - let applications = self - .applications - .iter() - .filter_map(|x| { - x.to_application_domain(context, x.to_build(container_registry), cloud_provider, logger.clone()) - }) - .collect::>(); + ) -> Result { + let mut applications = Vec::with_capacity(self.applications.len()); + for app in &self.applications { + match app.to_application_domain(context, app.to_build(container_registry), cloud_provider, logger.clone()) { + Ok(app) => applications.push(app), + Err(err) => { + return Err(err); + } + } + } + //FIXME: remove those flatten as it hide errors regarding conversion to model data type let routers = self .routers .iter() @@ -134,7 +136,7 @@ impl EnvironmentRequest { .filter_map(|x| x.to_database_domain(context, cloud_provider, logger.clone())) .collect::>(); - Environment::new( + Ok(Environment::new( self.id.as_str(), self.project_id.as_str(), self.owner_id.as_str(), @@ -143,7 +145,7 @@ impl EnvironmentRequest { applications, routers, databases, - ) + )) } } @@ -222,77 +224,68 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - AwsAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), - CPKind::Do => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - DoAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), - CPKind::Scw => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - ScwAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), + CPKind::Aws => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + AwsAppExtraSettings {}, + listeners, + logger.clone(), + )?)), + CPKind::Do => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + DoAppExtraSettings {}, + listeners, + logger.clone(), + )?)), + CPKind::Scw => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + ScwAppExtraSettings {}, + listeners, + logger.clone(), + )?)), } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index b07d30c1..af35a0f8 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -108,12 +108,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> (Environment, TransactionResult) { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.build_environment( @@ -135,12 +137,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.deploy_environment_with_options( @@ -161,12 +165,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.pause_environment(&env); @@ -180,12 +186,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.delete_environment(&env); @@ -1474,12 +1482,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - let env = env.to_environment_domain( - &context, - engine.cloud_provider(), - engine.container_registry().registry_info(), - logger.clone(), - ); + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); if let Err(err) = deploy_env_tx.deploy_environment(&env) { panic!("{:?}", err) @@ -1593,12 +1603,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - let env = env.to_environment_domain( - &context, - engine.cloud_provider(), - engine.container_registry().registry_info(), - logger.clone(), - ); + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); if let Err(err) = destroy_env_tx.delete_environment(&env) { panic!("{:?}", err)