diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b699374e..7eb52ed9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,6 +23,11 @@ jobs: terraform_version: 0.14.10 - name: build-linter-utests run: | + echo "########## LINTER ##########" + cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) + rustup component add clippy + cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) + export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache export SCCACHE_REDIS=${{ secrets.SCCACHE_REDIS }} @@ -34,9 +39,6 @@ jobs: echo "########## SHARED CACHE STATUS ##########" sccache --version sccache --show-stats - echo "########## LINTER ##########" - cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) - RUSTFLAGS="--deny warnings" cargo check || (echo "Solve your warnings to continue"; exit 1) echo "########## START BUILD ##########" cargo build --all-features sccache --show-stats diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8b137891..b28b04f6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1 +1,3 @@ + + diff --git a/Cargo.lock b/Cargo.lock index 7cae05a1..e5e49ce1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -944,9 +944,9 @@ checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "git2" -version = "0.13.25" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" +checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" dependencies = [ "bitflags", "libc", @@ -1445,9 +1445,9 @@ checksum = "a7f823d141fe0a24df1e23b4af4e3c7ba9e5966ec514ea068c93024aa7deb765" [[package]] name = "libgit2-sys" -version = "0.12.26+1.3.0" +version = "0.13.2+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" dependencies = [ "cc", "libc", @@ -2119,6 +2119,7 @@ dependencies = [ "tracing-test", "trust-dns-resolver", "url 2.2.2", + "urlencoding", "uuid 0.8.2", "walkdir", ] @@ -3281,8 +3282,10 @@ dependencies = [ "serde_derive", "serde_json", "time 0.2.27", + "tokio 1.10.0", "tracing", "tracing-subscriber", + "url 2.2.2", "uuid 0.8.2", ] @@ -3939,6 +3942,12 @@ dependencies = [ "url 1.7.2", ] +[[package]] +name = "urlencoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" + [[package]] name = "uuid" version = "0.7.4" diff --git a/Cargo.toml b/Cargo.toml index 2154f995..7f5b924b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] chrono = "0.4.19" cmd_lib = "1.0.13" -git2 = "0.13.25" +git2 = "0.14.2" walkdir = "2.3.2" itertools = "0.10.0" base64 = "0.13.0" @@ -30,6 +30,7 @@ function_name = "0.2.0" thiserror = "1.0.30" strum = "0.23" strum_macros = "0.23" +urlencoding = "2.1.0" # FIXME use https://crates.io/crates/blocking instead of runtime.rs @@ -79,7 +80,13 @@ tracing-test = "0.1.0" [features] default = [] -test-all = ["test-all-self-hosted", "test-all-infra", "test-all-managed-services", "test-all-whole-enchilada"] +test-all = ["test-all-minimal", "test-all-self-hosted", "test-all-infra", "test-all-managed-services", "test-all-whole-enchilada"] + +# Minimal depencies test (i.e: build, deploy nothing managed) +test-aws-minimal = [] +test-do-minimal = [] +test-scw-minimal = [] +test-all-minimal = ["test-aws-minimal", "test-do-minimal", "test-scw-minimal"] # functionnal tests by type test-aws-self-hosted = [] @@ -108,4 +115,6 @@ test-do-all = ["test-do-infra", "test-do-managed-services", "test-do-self-hosted test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-hosted", "test-scw-whole-enchilada"] # functionnal test with only a k8s cluster as a dependency -test-with-kube = [] +test-local-kube = [] +test-local-docker = [] +test-all-local = ["test-local-kube", "test-local-docker"] diff --git a/lib/common/bootstrap/charts/pleco/Chart.yaml b/lib/common/bootstrap/charts/pleco/Chart.yaml index 224f4ebd..47a0c5d9 100644 --- a/lib/common/bootstrap/charts/pleco/Chart.yaml +++ b/lib/common/bootstrap/charts/pleco/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 -appVersion: 0.10.4 +appVersion: 0.11.1 description: Automatically removes Cloud managed services and Kubernetes resources based on tags with TTL home: https://github.com/Qovery/pleco icon: https://github.com/Qovery/pleco/raw/main/assets/pleco_logo.png name: pleco type: application -version: 0.10.4 +version: 0.11.1 diff --git a/lib/common/bootstrap/charts/pleco/templates/deployment.yaml b/lib/common/bootstrap/charts/pleco/templates/deployment.yaml index 89f3b959..693d634d 100644 --- a/lib/common/bootstrap/charts/pleco/templates/deployment.yaml +++ b/lib/common/bootstrap/charts/pleco/templates/deployment.yaml @@ -94,6 +94,15 @@ spec: {{ if or (eq .Values.awsFeatures.ecr true)}} - --enable-ecr {{ end }} + {{ if or (eq .Values.awsFeatures.sfn true)}} + - --enable-sfn + {{ end }} + {{ if or (eq .Values.awsFeatures.sqs true)}} + - --enable-sqs + {{ end }} + {{ if or (eq .Values.awsFeatures.lambda true)}} + - --enable-lambda + {{ end }} {{- end }} # Scaleway features diff --git a/lib/common/bootstrap/charts/pleco/values-aws.yaml b/lib/common/bootstrap/charts/pleco/values-aws.yaml index 089e5ee5..23092d86 100644 --- a/lib/common/bootstrap/charts/pleco/values-aws.yaml +++ b/lib/common/bootstrap/charts/pleco/values-aws.yaml @@ -44,6 +44,9 @@ awsFeatures: iam: true sshKeys: true ecr: true + sfn: true + sqs: true + lambda: true resources: limits: diff --git a/lib/common/bootstrap/charts/pleco/values.yaml b/lib/common/bootstrap/charts/pleco/values.yaml index 09b4e135..322d352b 100644 --- a/lib/common/bootstrap/charts/pleco/values.yaml +++ b/lib/common/bootstrap/charts/pleco/values.yaml @@ -3,7 +3,7 @@ replicaCount: 1 image: repository: qoveryrd/pleco pullPolicy: IfNotPresent - plecoImageTag: "0.10.4" + plecoImageTag: "0.11.1" cloudProvider: "" diff --git a/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf b/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf index 4d9ad324..414e32e5 100644 --- a/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf +++ b/lib/digitalocean/bootstrap/doks-gen-kubectl-config.j2.tf @@ -7,5 +7,5 @@ KUBECONFIG resource "local_file" "kubeconfig" { filename = "${var.space_bucket_kubeconfig}/${var.kubeconfig_filename}" content = local.kubeconfig - file_permission = "0644" + file_permission = "0600" } \ No newline at end of file diff --git a/lib/digitalocean/bootstrap/tf-providers.j2.tf b/lib/digitalocean/bootstrap/tf-providers.j2.tf index bd5bf507..5d5f3474 100644 --- a/lib/digitalocean/bootstrap/tf-providers.j2.tf +++ b/lib/digitalocean/bootstrap/tf-providers.j2.tf @@ -15,7 +15,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.36.0" + version = "~> 3.66.0" } digitalocean = { source = "digitalocean/digitalocean" diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 9035b942..37e35087 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -70,7 +70,7 @@ charts: dest: services no_sync: true - name: pleco - version: 0.10.4 + version: 0.11.1 repo_name: pleco - name: do-k8s-token-rotate version: 0.1.3 diff --git a/lib/scaleway/bootstrap/tf-providers.j2.tf b/lib/scaleway/bootstrap/tf-providers.j2.tf index 5157bc29..b097c8bf 100644 --- a/lib/scaleway/bootstrap/tf-providers.j2.tf +++ b/lib/scaleway/bootstrap/tf-providers.j2.tf @@ -13,7 +13,7 @@ terraform { } aws = { source = "hashicorp/aws" - version = "~> 3.36.0" + version = "~> 3.66.0" } local = { source = "hashicorp/local" diff --git a/rustfmt.toml b/rustfmt.toml index 9328e6c1..abb6a864 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,5 @@ -max_width = 120 edition = "2018" +max_width = 120 +fn_call_width = 80 +attr_fn_like_width = 80 +use_field_init_shorthand = true diff --git a/src/build_platform/docker.rs b/src/build_platform/dockerfile_utils.rs similarity index 50% rename from src/build_platform/docker.rs rename to src/build_platform/dockerfile_utils.rs index 973f02fe..2873b4b3 100644 --- a/src/build_platform/docker.rs +++ b/src/build_platform/dockerfile_utils.rs @@ -24,7 +24,7 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec) -> Result>(); + let x = arg_value.split('=').collect::>(); x.get(0).unwrap_or(&"").to_string() }) .collect::>(); @@ -32,39 +32,11 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec) -> Result, - dockerfile_content: Vec, -) -> Result, Utf8Error> { - // extract env vars used in the Dockerfile - let used_args = extract_dockerfile_args(dockerfile_content)?; - - // match env var args and dockerfile env vargs - let env_var_arg_keys = env_var_args - .iter() - .map(|env_var| env_var.split("=").next().unwrap_or(&"").to_string()) - .collect::>(); - - let matched_env_args_keys = env_var_arg_keys - .intersection(&used_args) - .map(|arg| arg.clone()) - .collect::>(); - - Ok(env_var_args - .into_iter() - .filter(|env_var_arg| { - let env_var_arg_key = env_var_arg.split("=").next().unwrap_or(""); - matched_env_args_keys.contains(env_var_arg_key) - }) - .collect::>()) -} - #[cfg(test)] mod tests { use super::*; + use maplit::btreemap; + use std::collections::BTreeMap; #[test] fn test_extract_dockerfile_args() { @@ -113,29 +85,29 @@ mod tests { let res = extract_dockerfile_args(dockerfile.to_vec()); assert_eq!(res.unwrap().len(), 4); - let env_var_args_to_match = vec![ - "foo=abcdvalue".to_string(), - "bar=abcdvalue".to_string(), - "toto=abcdvalue".to_string(), - "x=abcdvalue".to_string(), + let args = btreemap![ + "foo" => "abcdvalue", + "bar" => "abcdvalue", + "toto" => "abcdvalue", + "x" => "abcdvalue", ]; - let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec()); + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret, args); - assert_eq!(matched_vars.clone().unwrap(), env_var_args_to_match.clone()); + let args = btreemap!["toto" => "abcdvalue", "x" => "abcdvalue"]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 2); - assert_eq!(matched_vars.unwrap().len(), 4); - - let matched_vars = match_used_env_var_args( - vec!["toto=abcdvalue".to_string(), "x=abcdvalue".to_string()], - dockerfile.to_vec(), - ); - - assert_eq!(matched_vars.unwrap().len(), 2); - - let matched_vars = match_used_env_var_args(vec![], dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let args: BTreeMap<&str, &str> = btreemap![]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); let dockerfile = b" FROM node @@ -144,9 +116,10 @@ mod tests { RUN ls -lh "; - let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); } #[test] @@ -180,24 +153,26 @@ mod tests { let res = extract_dockerfile_args(dockerfile.to_vec()); assert_eq!(res.unwrap().len(), 3); - let matched_vars = match_used_env_var_args( - vec![ - "PRISMIC_REPO_NAME=abcdvalue".to_string(), - "PRISMIC_API_KEY=abcdvalue".to_string(), - "PRISMIC_CUSTOM_TYPES_API_TOKEN=abcdvalue".to_string(), - ], - dockerfile.to_vec(), - ); + let args = btreemap![ + "PRISMIC_REPO_NAME" => "abcdvalue", + "PRISMIC_API_KEY" => "abcdvalue", + "PRISMIC_CUSTOM_TYPES_API_TOKEN" => "abcdvalue", + ]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 3); - assert_eq!(matched_vars.unwrap().len(), 3); + let args = btreemap!["PRISMIC_REPO_NAME" => "abcdvalue"]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 1); - let matched_vars = - match_used_env_var_args(vec!["PRISMIC_REPO_NAME=abcdvalue".to_string()], dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 1); - - let matched_vars = match_used_env_var_args(vec![], dockerfile.to_vec()); - - assert_eq!(matched_vars.unwrap().len(), 0); + let args: BTreeMap<&str, &str> = btreemap![]; + let matched_vars = extract_dockerfile_args(dockerfile.to_vec()).unwrap(); + let mut ret = args.clone(); + ret.retain(|k, _| matched_vars.contains(*k)); + assert_eq!(ret.len(), 0); } } diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 168d565b..9728a235 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -1,22 +1,28 @@ -use std::path::Path; +#![allow(clippy::redundant_closure)] + +use std::io::{Error, ErrorKind}; +use std::path::{Path, PathBuf}; +use std::time::Duration; use std::{env, fs}; -use chrono::Duration; use git2::{Cred, CredentialType}; use sysinfo::{Disk, DiskExt, SystemExt}; -use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, CacheResult, Credentials, Image, Kind}; -use crate::cmd::command::QoveryCommand; -use crate::errors::{CommandError, EngineError, Tag}; +use crate::build_platform::dockerfile_utils::extract_dockerfile_args; +use crate::build_platform::{Build, BuildError, BuildPlatform, Credentials, Kind}; +use crate::cmd::command; +use crate::cmd::command::CommandError::Killed; +use crate::cmd::command::{CommandKiller, QoveryCommand}; +use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; -const BUILD_DURATION_TIMEOUT_MIN: i64 = 30; +const BUILD_DURATION_TIMEOUT_SEC: u64 = 30 * 60; /// https://buildpacks.io/ const BUILDPACKS_BUILDERS: [&str; 1] = [ @@ -36,208 +42,191 @@ pub struct LocalDocker { } impl LocalDocker { - pub fn new(context: Context, id: &str, name: &str, logger: Box) -> Self { - LocalDocker { + pub fn new(context: Context, id: &str, name: &str, logger: Box) -> Result { + Ok(LocalDocker { context, id: id.to_string(), name: name.to_string(), listeners: vec![], logger, - } - } - - fn image_does_exist(&self, image: &Image) -> Result { - let mut cmd = QoveryCommand::new( - "docker", - &vec!["image", "inspect", image.name_with_tag().as_str()], - &self.get_docker_host_envs(), - ); - - Ok(matches!(cmd.exec(), Ok(_))) + }) } fn get_docker_host_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], + if let Some(socket_path) = self.context.docker_tcp_socket() { + vec![("DOCKER_HOST", socket_path.as_str())] + } else { + vec![] } } - /// Read Dockerfile content from location path and return an array of bytes - fn get_dockerfile_content(&self, dockerfile_path: &str) -> Result, EngineError> { - match fs::read(dockerfile_path) { - Ok(bytes) => Ok(bytes), - Err(err) => { - let engine_error = EngineError::new_docker_cannot_read_dockerfile( - self.get_event_details(), - dockerfile_path.to_string(), - CommandError::new(err.to_string(), None), - ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(engine_error.clone(), None)); - Err(engine_error) - } + fn reclaim_space_if_needed(&self) { + if env::var_os("CI").is_some() { + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe("CI environment variable found, no docker prune will be made".to_string()), + )); + + return; + } + + // ensure there is enough disk space left before building a new image + let docker_path_string = "/var/lib/docker"; + let docker_path = Path::new(docker_path_string); + + // get system info + let mut system = sysinfo::System::new_all(); + system.refresh_all(); + + for disk in system.get_disks() { + if disk.get_mount_point() == docker_path { + let event_details = self.get_event_details(); + if let Err(e) = check_docker_space_usage_and_clean( + &self.context.docker, + disk, + event_details.clone(), + &*self.logger(), + ) { + self.logger.log(EngineEvent::Warning( + event_details, + EventMessage::new(e.to_string(), Some(e.to_string())), + )); + } + break; + }; } } fn build_image_with_docker( &self, - build: Build, + build: &mut Build, dockerfile_complete_path: &str, into_dir_docker_style: &str, - env_var_args: Vec, - use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result { - let mut docker_args = if !use_build_cache { - vec!["build", "--no-cache"] - } else { - vec!["build"] + ) -> Result<(), BuildError> { + // logger + let log_info = { + let app_id = build.image.application_id.clone(); + move |msg: String| { + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(msg.clone()), + )); + + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { id: app_id.clone() }, + ProgressLevel::Info, + Some(msg), + self.context.execution_id(), + )); + } }; - let args = self.context.docker_build_options(); - for v in args.iter() { - for s in v.iter() { - docker_args.push(String::as_str(s)); + // Going to inject only env var that are used by the dockerfile + // so extracting it and modifying the image tag and env variables + let dockerfile_content = fs::read(dockerfile_complete_path).map_err(|err| { + BuildError::IoError( + build.image.application_id.clone(), + "reading dockerfile content".to_string(), + err, + ) + })?; + let dockerfile_args = match extract_dockerfile_args(dockerfile_content) { + Ok(dockerfile_args) => dockerfile_args, + Err(err) => { + let msg = format!("Cannot extract env vars from your dockerfile {}", err); + return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); } + }; + + // Keep only the env variables we want for our build + // and force re-compute the image tag + build.environment_variables.retain(|k, _| dockerfile_args.contains(k)); + build.compute_image_tag(); + + // Prepare image we want to build + let image_to_build = ContainerImage { + registry: build.image.registry_url.clone(), + name: build.image.name(), + tags: vec![build.image.tag.clone(), "latest".to_string()], + }; + + let image_cache = ContainerImage { + registry: build.image.registry_url.clone(), + name: build.image.name(), + tags: vec!["latest".to_string()], + }; + + // Check if the image does not exist already remotely, if yes, we skip the build + let image_name = image_to_build.image_name(); + log_info(format!("🕵️ Checking if image already exist remotely {}", image_name)); + if let Ok(true) = self.context.docker.does_image_exist_remotely(&image_to_build) { + log_info(format!("🎯 Skipping build. Image already exist in the registry {}", image_name)); + + // skip build + return Ok(()); } - let name_with_tag = build.image.name_with_tag(); - let name_with_latest_tag = build.image.name_with_latest_tag(); + log_info(format!("⛏️ Building image. It does not exist remotely {}", image_name)); + // Actually do the build of the image + let env_vars: Vec<(&str, &str)> = build + .environment_variables + .iter() + .map(|(k, v)| (k.as_str(), v.as_str())) + .collect(); - docker_args.extend(vec![ - "-f", - dockerfile_complete_path, - "-t", - name_with_tag.as_str(), - "-t", - name_with_latest_tag.as_str(), - ]); - - let dockerfile_content = self.get_dockerfile_content(dockerfile_complete_path)?; - let env_var_args = match docker::match_used_env_var_args(env_var_args, dockerfile_content) { - Ok(env_var_args) => env_var_args, - Err(err) => { - let engine_error = EngineError::new_docker_cannot_extract_env_vars_from_dockerfile( - self.get_event_details(), - dockerfile_complete_path.to_string(), - CommandError::new(err.to_string(), None), - ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(engine_error.clone(), None)); - return Err(engine_error); - } - }; - - let mut docker_args = if env_var_args.is_empty() { - docker_args - } else { - let mut build_args = vec![]; - - env_var_args.iter().for_each(|arg_value| { - build_args.push("--build-arg"); - build_args.push(arg_value.as_str()); - }); - - docker_args.extend(build_args); - docker_args - }; - - docker_args.push(into_dir_docker_style); - - // docker build - let mut cmd = QoveryCommand::new("docker", &docker_args, &self.get_docker_host_envs()); - - let exit_status = cmd.exec_with_abort( - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), - |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); - - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Info, - Some(line), - self.context.execution_id(), - )); - }, - |line| { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); - - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Warn, - Some(line), - self.context.execution_id(), - )); - }, - is_task_canceled, + let exit_status = self.context.docker.build( + Path::new(dockerfile_complete_path), + Path::new(into_dir_docker_style), + &image_to_build, + &env_vars, + &image_cache, + true, + &mut |line| log_info(line), + &mut |line| log_info(line), + &CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled), ); match exit_status { - Ok(_) => Ok(BuildResult { build }), - Err(err) => Err(EngineError::new_docker_cannot_build_container_image( - self.get_event_details(), - self.name_with_id(), - CommandError::new(format!("{:?}", err), None), - )), + Ok(_) => Ok(()), + Err(DockerError::Aborted(msg)) => Err(BuildError::Aborted(msg)), + Err(err) => Err(BuildError::DockerError(build.image.application_id.clone(), err)), } } fn build_image_with_buildpacks( &self, - build: Build, + build: &Build, into_dir_docker_style: &str, - env_var_args: Vec, use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result { - let name_with_tag = build.image.name_with_tag(); + ) -> Result<(), BuildError> { + let name_with_tag = build.image.full_image_name_with_tag(); + let name_with_latest_tag = format!("{}:latest", build.image.full_image_name()); - let args = self.context.docker_build_options(); - - let mut exit_status: Result<(), CommandError> = - Err(CommandError::new_from_safe_message("No builder names".to_string())); + let mut exit_status: Result<(), command::CommandError> = Err(command::CommandError::ExecutionError( + Error::new(ErrorKind::InvalidData, "No builder names".to_string()), + )); for builder_name in BUILDPACKS_BUILDERS.iter() { let mut buildpacks_args = if !use_build_cache { - vec!["build", name_with_tag.as_str(), "--clear-cache"] + vec!["build", "--publish", name_with_tag.as_str(), "--clear-cache"] } else { - vec!["build", name_with_tag.as_str()] + vec!["build", "--publish", name_with_tag.as_str()] }; - for v in args.iter() { - for s in v.iter() { - buildpacks_args.push(String::as_str(s)); - } - } - + // always add 'latest' tag + buildpacks_args.extend(vec!["-t", name_with_latest_tag.as_str()]); buildpacks_args.extend(vec!["--path", into_dir_docker_style]); - let mut buildpacks_args = if env_var_args.is_empty() { - buildpacks_args - } else { - let mut build_args = vec![]; - - env_var_args.iter().for_each(|x| { - build_args.push("--env"); - build_args.push(x.as_str()); - }); - - buildpacks_args.extend(build_args); - buildpacks_args - }; + let mut args_buffer = Vec::with_capacity(build.environment_variables.len()); + for (key, value) in &build.environment_variables { + args_buffer.push("--env".to_string()); + args_buffer.push(format!("{}={}", key, value)); + } + buildpacks_args.extend(args_buffer.iter().map(|value| value.as_str()).collect::>()); buildpacks_args.push("-B"); buildpacks_args.push(builder_name); @@ -259,26 +248,10 @@ impl LocalDocker { } _ => { let msg = format!( - "Cannot build: Invalid buildpacks language format: expected `builder[@version]` got {}", + "Invalid buildpacks language format: expected `builder[@version]` got {}", buildpacks_language ); - lh.deployment_error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Error, - Some(msg.clone()), - self.context.execution_id(), - )); - - let err = EngineError::new_buildpack_invalid_language_format( - self.get_event_details(), - buildpacks_language.to_string(), - ); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - return Err(err); + return Err(BuildError::InvalidConfig(build.image.application_id.clone(), msg)); } } } @@ -295,45 +268,40 @@ impl LocalDocker { // buildpacks build let mut cmd = QoveryCommand::new("pack", &buildpacks_args, &self.get_docker_host_envs()); - exit_status = cmd - .exec_with_abort( - Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), - |line| { - self.logger.log( - LogLevel::Info, - EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(line.to_string())), - ); + let cmd_killer = CommandKiller::from(Duration::from_secs(BUILD_DURATION_TIMEOUT_SEC), is_task_canceled); + exit_status = cmd.exec_with_abort( + &mut |line| { + self.logger.log(EngineEvent::Info( + self.get_event_details(), + EventMessage::new_from_safe(line.to_string()), + )); - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Info, - Some(line), - self.context.execution_id(), - )); - }, - |line| { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - self.get_event_details(), - EventMessage::new_from_safe(line.to_string()), - ), - ); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { + id: build.image.application_id.clone(), + }, + ProgressLevel::Info, + Some(line), + self.context.execution_id(), + )); + }, + &mut |line| { + self.logger.log(EngineEvent::Warning( + self.get_event_details(), + EventMessage::new_from_safe(line.to_string()), + )); - lh.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Warn, - Some(line), - self.context.execution_id(), - )); - }, - is_task_canceled, - ) - .map_err(|err| CommandError::new(format!("{:?}", err), None)); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { + id: build.image.application_id.clone(), + }, + ProgressLevel::Warn, + Some(line), + self.context.execution_id(), + )); + }, + &cmd_killer, + ); if exit_status.is_ok() { // quit now if the builder successfully build the app @@ -342,33 +310,23 @@ impl LocalDocker { } match exit_status { - Ok(_) => Ok(BuildResult { build }), - Err(err) => { - let error = EngineError::new_buildpack_cannot_build_container_image( - self.get_event_details(), - self.name_with_id(), - BUILDPACKS_BUILDERS.iter().map(|b| b.to_string()).collect(), - CommandError::new(format!("{:?}", err), None), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - Err(error) - } + Ok(_) => Ok(()), + Err(Killed(msg)) => Err(BuildError::Aborted(msg)), + Err(err) => Err(BuildError::BuildpackError(build.image.application_id.clone(), err)), } } - fn get_repository_build_root_path(&self, build: &Build) -> Result { + fn get_repository_build_root_path(&self, build: &Build) -> Result { workspace_directory( self.context.workspace_root_dir(), self.context.execution_id(), format!("build/{}", build.image.name.as_str()), ) .map_err(|err| { - EngineError::new_cannot_get_workspace_directory( - self.get_event_details(), - CommandError::new(err.to_string(), None), + BuildError::IoError( + build.image.application_id.clone(), + "when creating build workspace".to_string(), + err, ) }) } @@ -391,107 +349,40 @@ impl BuildPlatform for LocalDocker { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { - if !crate::cmd::command::does_binary_exist("docker") { - return Err(EngineError::new_missing_required_binary( - self.get_event_details(), - "docker".to_string(), - )); - } - - if !crate::cmd::command::does_binary_exist("pack") { - return Err(EngineError::new_missing_required_binary( - self.get_event_details(), - "pack".to_string(), - )); - } - - Ok(()) - } - - fn has_cache(&self, build: &Build) -> Result { - info!("LocalDocker.has_cache() called for {}", self.name()); - - // Check if a local cache layers for the container image exists. - let repository_root_path = self.get_repository_build_root_path(&build)?; - - let parent_build = build.to_previous_build(repository_root_path).map_err(|err| { - EngineError::new_builder_get_build_error(self.get_event_details(), build.image.commit_id.to_string(), err) - })?; - - let parent_build = match parent_build { - Some(parent_build) => parent_build, - None => return Ok(CacheResult::MissWithoutParentBuild), - }; - - // check if local layers exist - let mut cmd = QoveryCommand::new("docker", &["images", "-q", parent_build.image.name.as_str()], &[]); - - let mut result = CacheResult::Miss(parent_build); - let _ = cmd.exec_with_timeout( - Duration::minutes(1), // `docker images` command can be slow with tons of images - it's probably not indexed - |_| result = CacheResult::Hit, // if a line is returned, then the image is locally present - |r_err| error!("Error executing docker command {}", r_err), - ); - - Ok(result) - } - - fn build( - &self, - build: Build, - force_build: bool, - is_task_canceled: &dyn Fn() -> bool, - ) -> Result { + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError> { let event_details = self.get_event_details(); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("LocalDocker.build() called".to_string()), - ), - ); - - if is_task_canceled() { - return Err(EngineError::new_task_cancellation_requested(event_details.clone())); - } - let listeners_helper = ListenersHelper::new(&self.listeners); + let app_id = build.image.application_id.clone(); - if !force_build && self.image_does_exist(&build.image)? { - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Image `{}` found on repository, container build is not required", - build.image.name_with_tag() - )), - ), - ); - - return Ok(BuildResult { build }); + // check if we should already abort the task + if is_task_canceled() { + return Err(BuildError::Aborted(build.image.application_id.clone())); } - let repository_root_path = self.get_repository_build_root_path(&build)?; - - self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Cloning repository: {} to {}", - build.git_repository.url, repository_root_path - )), - ), + // LOGGING + let repository_root_path = PathBuf::from(self.get_repository_build_root_path(build)?); + let msg = format!( + "📥 Cloning repository: {} to {}", + build.git_repository.url, + repository_root_path.to_string_lossy() ); + listeners_helper.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { id: app_id.clone() }, + ProgressLevel::Info, + Some(msg.clone()), + self.context.execution_id(), + )); + self.logger + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))); + // LOGGING + // Create callback that will be called by git to provide credentials per user + // If people use submodule, they need to provide us their ssh key let get_credentials = |user: &str| { let mut creds: Vec<(CredentialType, Cred)> = Vec::with_capacity(build.git_repository.ssh_keys.len() + 1); for ssh_key in build.git_repository.ssh_keys.iter() { - let public_key = ssh_key.public_key.as_ref().map(|x| x.as_str()); - let passphrase = ssh_key.passphrase.as_ref().map(|x| x.as_str()); + let public_key = ssh_key.public_key.as_deref(); + let passphrase = ssh_key.passphrase.as_deref(); if let Ok(cred) = Cred::ssh_key_from_memory(user, public_key, &ssh_key.private_key, passphrase) { creds.push((CredentialType::SSH_MEMORY, cred)); } @@ -500,142 +391,84 @@ impl BuildPlatform for LocalDocker { if let Some(Credentials { login, password }) = &build.git_repository.credentials { creds.push(( CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(&login, &password).unwrap(), + Cred::userpass_plaintext(login, password).unwrap(), )); } creds }; - if Path::new(repository_root_path.as_str()).exists() { - // remove folder before cloning it again - // FIXME: reuse this folder and checkout the right commit - let _ = fs::remove_dir_all(repository_root_path.as_str()); + // Cleanup, mono repo can require to clone multiple time the same repo + // FIXME: re-use the same repo and just checkout at the correct commit + if repository_root_path.exists() { + let app_id = app_id; + fs::remove_dir_all(&repository_root_path) + .map_err(|err| BuildError::IoError(app_id, "cleaning old repository".to_string(), err))?; } - // git clone - if is_task_canceled() { - return Err(EngineError::new_task_cancellation_requested(event_details.clone())); - } + // Do the real git clone if let Err(clone_error) = git::clone_at_commit( &build.git_repository.url, &build.git_repository.commit_id, &repository_root_path, &get_credentials, ) { - let error = EngineError::new_builder_clone_repository_error( - self.get_event_details(), - build.git_repository.url.to_string(), - CommandError::new(clone_error.to_string(), None), - ); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + return Err(BuildError::GitError(build.image.application_id.clone(), clone_error)); } - let mut disable_build_cache = false; - let mut env_var_args: Vec = Vec::with_capacity(build.options.environment_variables.len()); - - for ev in &build.options.environment_variables { - if ev.key == "QOVERY_DISABLE_BUILD_CACHE" && ev.value.to_lowercase() == "true" { - // this is a special flag to disable build cache dynamically - // -- do not pass this env var key/value to as build parameter - disable_build_cache = true; - } else { - env_var_args.push(format!("{}={}", ev.key, ev.value)); - } + if is_task_canceled() { + return Err(BuildError::Aborted(build.image.application_id.clone())); } // ensure docker_path is a mounted volume, otherwise ignore because it's not what Qovery does in production // ex: this cause regular cleanup on CI, leading to random tests errors - match env::var_os("CI") { - Some(_) => self.logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "CI environment variable found, no docker prune will be made".to_string(), - ), - ), - ), - None => { - // ensure there is enough disk space left before building a new image - let docker_path_string = "/var/lib/docker"; - let docker_path = Path::new(docker_path_string); - - // get system info - let mut system = sysinfo::System::new_all(); - system.refresh_all(); - - for disk in system.get_disks() { - if disk.get_mount_point() == docker_path { - let event_details = self.get_event_details(); - if let Err(e) = check_docker_space_usage_and_clean( - disk, - self.get_docker_host_envs(), - event_details.clone(), - &*self.logger(), - ) { - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new(e.message_raw(), e.message_safe()), - ), - ); - } - break; - }; - } - } - } + self.reclaim_space_if_needed(); let app_id = build.image.application_id.clone(); - let build_context_path = format!("{}/{}/.", repository_root_path.as_str(), build.git_repository.root_path); + + // Check that the build context is correct + let build_context_path = repository_root_path.join(&build.git_repository.root_path); + if !build_context_path.is_dir() { + let msg = format!( + "Specified build context path {:?} does not exist within the repository", + &build.git_repository.root_path + ); + return Err(BuildError::InvalidConfig(app_id, msg)); + } + + // Safety check to ensure we can't go up in the directory + if !build_context_path + .canonicalize() + .unwrap_or_default() + .starts_with(repository_root_path.canonicalize().unwrap_or_default()) + { + let msg = format!( + "Specified build context path {:?} tries to access directory outside of his git repository", + &build.git_repository.root_path + ); + return Err(BuildError::InvalidConfig(app_id, msg)); + } + + // now we have to decide if we use buildpack or docker to build our application // If no Dockerfile specified, we should use BuildPacks - let result = if build.git_repository.dockerfile_path.is_some() { + let result = if let Some(dockerfile_path) = &build.git_repository.dockerfile_path { // build container from the provided Dockerfile - let dockerfile_relative_path = build.git_repository.dockerfile_path.as_ref().unwrap(); - let dockerfile_normalized_path = match dockerfile_relative_path.trim() { - "" | "." | "/" | "/." | "./" | "Dockerfile" => "Dockerfile", - dockerfile_root_path => dockerfile_root_path, - }; - - let dockerfile_relative_path = format!("{}/{}", build.git_repository.root_path, dockerfile_normalized_path); - let dockerfile_absolute_path = format!("{}/{}", repository_root_path.as_str(), dockerfile_relative_path); + let dockerfile_absolute_path = repository_root_path.join(dockerfile_path); // If the dockerfile does not exist, abort - if !Path::new(dockerfile_absolute_path.as_str()).exists() { - listeners_helper.error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id.clone(), - }, - ProgressLevel::Error, - Some(format!( - "Dockerfile is not present at location {}", - dockerfile_relative_path - )), - self.context.execution_id(), - )); - - let error = - EngineError::new_docker_cannot_find_dockerfile(self.get_event_details(), dockerfile_absolute_path); - - self.logger - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - - return Err(error); + if !dockerfile_absolute_path.is_file() { + let msg = format!( + "Specified dockerfile path {:?} does not exist within the repository", + &dockerfile_path + ); + return Err(BuildError::InvalidConfig(app_id, msg)); } self.build_image_with_docker( build, - dockerfile_absolute_path.as_str(), - build_context_path.as_str(), - env_var_args, - !disable_build_cache, + dockerfile_absolute_path.to_str().unwrap_or_default(), + build_context_path.to_str().unwrap_or_default(), &listeners_helper, is_task_canceled, ) @@ -643,75 +476,16 @@ impl BuildPlatform for LocalDocker { // build container with Buildpacks self.build_image_with_buildpacks( build, - build_context_path.as_str(), - env_var_args, - !disable_build_cache, + build_context_path.to_str().unwrap_or_default(), + !build.disable_cache, &listeners_helper, is_task_canceled, ) }; - let msg = match &result { - Ok(_) => format!("✅ Container {} is built", self.name_with_id()), - Err(engine_err) if engine_err.tag() == &Tag::TaskCancellationRequested => { - format!("🚫 Container {} build has been canceled", self.name_with_id()) - } - Err(engine_err) => { - format!( - "❌ Container {} failed to be build: {}", - self.name_with_id(), - engine_err.message() - ) - } - }; - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { id: app_id }, - ProgressLevel::Info, - Some(msg.to_string()), - self.context.execution_id(), - )); - - self.logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg.to_string())), - ); - result } - fn build_error(&self, build: Build) -> Result { - let event_details = self.get_event_details(); - self.logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!("LocalDocker.build_error() called for {}", self.name())), - ), - ); - - let listener_helper = ListenersHelper::new(&self.listeners); - - // FIXME - let message = String::from("something goes wrong (not implemented)"); - - listener_helper.error(ProgressInfo::new( - ProgressScope::Application { - id: build.image.application_id, - }, - ProgressLevel::Error, - Some(message.as_str()), - self.context.execution_id(), - )); - - let err = EngineError::new_not_implemented_error(event_details); - - self.logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); - - // FIXME - Err(err) - } - fn logger(&self) -> Box { self.logger.clone() } @@ -734,68 +508,36 @@ impl ToTransmitter for LocalDocker { } fn check_docker_space_usage_and_clean( + docker: &Docker, docker_path_size_info: &Disk, - envs: Vec<(&str, &str)>, event_details: EventDetails, logger: &dyn Logger, -) -> Result<(), CommandError> { +) -> Result<(), DockerError> { let docker_max_disk_percentage_usage_before_purge = 60; // arbitrary percentage that should make the job anytime let available_space = docker_path_size_info.get_available_space(); let docker_percentage_remaining = available_space * 100 / docker_path_size_info.get_total_space(); if docker_percentage_remaining < docker_max_disk_percentage_usage_before_purge || available_space == 0 { - logger.log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", - docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge - )), - ), - ); + logger.log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe(format!( + "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", + docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge + )), + )); - return docker_prune_images(envs); + return docker.prune_images(); }; - logger.log( - LogLevel::Info, - EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "No need to purge old docker images, only {}% ({}/{}) disk used", - 100 - docker_percentage_remaining, - docker_path_size_info.get_available_space(), - docker_path_size_info.get_total_space(), - )), - ), - ); - - Ok(()) -} - -fn docker_prune_images(envs: Vec<(&str, &str)>) -> Result<(), CommandError> { - let all_prunes_commands = vec![ - vec!["container", "prune", "-f"], - vec!["image", "prune", "-a", "-f"], - vec!["builder", "prune", "-a", "-f"], - vec!["volume", "prune", "-f"], - ]; - - let mut errored_commands = vec![]; - for prune in all_prunes_commands { - let mut cmd = QoveryCommand::new("docker", &prune, &envs); - if let Err(e) = cmd.exec_with_timeout(Duration::minutes(BUILD_DURATION_TIMEOUT_MIN), |_| {}, |_| {}) { - errored_commands.push(format!("{} {:?}", prune[0], e)); - } - } - - if errored_commands.len() > 0 { - return Err(CommandError::new( - errored_commands.join("/ "), - Some("Error while trying to prune images.".to_string()), - )); - } + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe(format!( + "No need to purge old docker images, only {}% ({}/{}) disk used", + 100 - docker_percentage_remaining, + docker_path_size_info.get_available_space(), + docker_path_size_info.get_total_space(), + )), + )); Ok(()) } diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 5480fb37..55a57eb0 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -1,19 +1,49 @@ use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; -use crate::errors::{CommandError, EngineError}; +use crate::cmd::command::CommandError; +use crate::cmd::docker::DockerError; +use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; -use crate::git; +use crate::io_models::{Context, Listen, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::{Context, Listen, QoveryIdentifier}; -use crate::utilities::get_image_tag; -use git2::{Cred, CredentialType}; +use crate::utilities::compute_image_tag; use std::fmt::{Display, Formatter, Result as FmtResult}; -use std::path::Path; +use std::hash::Hash; +use std::path::PathBuf; +use url::Url; -pub mod docker; +pub mod dockerfile_utils; pub mod local_docker; +#[derive(thiserror::Error, Debug)] +pub enum BuildError { + #[error("Cannot build Application {0} due to an invalid config: {1}")] + InvalidConfig(String, String), + + #[error("Cannot build Application {0} due to an error with git: {1}")] + GitError(String, git2::Error), + + #[error("Build of Application {0} have been aborted at user request")] + Aborted(String), + + #[error("Cannot build Application {0} due to an io error: {1} {2}")] + IoError(String, String, std::io::Error), + + #[error("Cannot build Application {0} due to an error with docker: {1}")] + DockerError(String, DockerError), + + #[error("Cannot build Application {0} due to an error with buildpack: {1}")] + BuildpackError(String, CommandError), +} + +pub fn to_engine_error(event_details: EventDetails, err: BuildError) -> EngineError { + match err { + BuildError::Aborted(_) => EngineError::new_task_cancellation_requested(event_details), + _ => EngineError::new_build_error(event_details, err), + } +} + pub trait BuildPlatform: ToTransmitter + Listen { fn context(&self) -> &Context; fn kind(&self) -> Kind; @@ -22,15 +52,7 @@ pub trait BuildPlatform: ToTransmitter + Listen { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), EngineError>; - fn has_cache(&self, build: &Build) -> Result; - fn build( - &self, - build: Build, - force_build: bool, - is_task_canceled: &dyn Fn() -> bool, - ) -> Result; - fn build_error(&self, build: Build) -> Result; + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError>; fn logger(&self) -> Box; fn get_event_details(&self) -> EventDetails { let context = self.context(); @@ -49,70 +71,21 @@ pub trait BuildPlatform: ToTransmitter + Listen { pub struct Build { pub git_repository: GitRepository, pub image: Image, - pub options: BuildOptions, + pub environment_variables: BTreeMap, + pub disable_cache: bool, } impl Build { - pub fn to_previous_build

(&self, clone_repo_into_dir: P) -> Result, CommandError> - where - P: AsRef, - { - let parent_commit_id = git::get_parent_commit_id( - self.git_repository.url.as_str(), - self.git_repository.commit_id.as_str(), - clone_repo_into_dir, - &|_| match &self.git_repository.credentials { - None => vec![], - Some(creds) => vec![( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(creds.login.as_str(), creds.password.as_str()).unwrap(), - )], - }, - ) - .map_err(|err| CommandError::new(err.to_string(), Some("Cannot get parent commit ID.".to_string())))?; - - let parent_commit_id = match parent_commit_id { - None => return Ok(None), - Some(parent_commit_id) => parent_commit_id, - }; - - let mut environment_variables_map = BTreeMap::::new(); - for env in &self.options.environment_variables { - environment_variables_map.insert(env.key.clone(), env.value.clone()); - } - - let mut image = self.image.clone(); - image.tag = get_image_tag( + pub fn compute_image_tag(&mut self) { + self.image.tag = compute_image_tag( &self.git_repository.root_path, &self.git_repository.dockerfile_path, - &environment_variables_map, - &parent_commit_id, + &self.environment_variables, + &self.git_repository.commit_id, ); - - image.commit_id = parent_commit_id.clone(); - - Ok(Some(Build { - git_repository: GitRepository { - url: self.git_repository.url.clone(), - credentials: self.git_repository.credentials.clone(), - ssh_keys: self.git_repository.ssh_keys.clone(), - commit_id: parent_commit_id, - dockerfile_path: self.git_repository.dockerfile_path.clone(), - root_path: self.git_repository.root_path.clone(), - buildpack_language: self.git_repository.buildpack_language.clone(), - }, - image, - options: BuildOptions { - environment_variables: self.options.environment_variables.clone(), - }, - })) } } -pub struct BuildOptions { - pub environment_variables: Vec, -} - #[derive(Clone, Eq, PartialEq, Hash, Debug)] pub struct EnvironmentVariable { pub key: String, @@ -133,12 +106,12 @@ pub struct SshKey { } pub struct GitRepository { - pub url: String, + pub url: Url, pub credentials: Option, pub ssh_keys: Vec, pub commit_id: String, - pub dockerfile_path: Option, - pub root_path: String, + pub dockerfile_path: Option, + pub root_path: PathBuf, pub buildpack_language: Option, } @@ -148,23 +121,43 @@ pub struct Image { pub name: String, pub tag: String, pub commit_id: String, - // registry name where the image has been pushed: Optional - pub registry_name: Option, + // registry name where the image has been pushed + pub registry_name: String, // registry docker json config: Optional pub registry_docker_json_config: Option, - // registry secret to pull image: Optional - pub registry_secret: Option, // complete registry URL where the image has been pushed - pub registry_url: Option, + pub registry_url: Url, + pub repository_name: String, } impl Image { - pub fn name_with_tag(&self) -> String { - format!("{}:{}", self.name, self.tag) + pub fn registry_host(&self) -> &str { + self.registry_url.host_str().unwrap() + } + pub fn repository_name(&self) -> &str { + &self.repository_name + } + pub fn full_image_name_with_tag(&self) -> String { + format!( + "{}/{}:{}", + self.registry_url.host_str().unwrap_or_default(), + self.name, + self.tag + ) } - pub fn name_with_latest_tag(&self) -> String { - format!("{}:latest", self.name) + pub fn full_image_name(&self) -> String { + format!("{}/{}", self.registry_url.host_str().unwrap_or_default(), self.name,) + } + + pub fn name(&self) -> String { + self.name.clone() + } + + pub fn name_without_repository(&self) -> &str { + self.name + .strip_prefix(&format!("{}/", self.repository_name())) + .unwrap_or(&self.name) } } @@ -175,10 +168,10 @@ impl Default for Image { name: "".to_string(), tag: "".to_string(), commit_id: "".to_string(), - registry_name: None, + registry_name: "".to_string(), registry_docker_json_config: None, - registry_secret: None, - registry_url: None, + registry_url: Url::parse("https://default.com").unwrap(), + repository_name: "".to_string(), } } } @@ -193,26 +186,8 @@ impl Display for Image { } } -pub struct BuildResult { - pub build: Build, -} - -impl BuildResult { - pub fn new(build: Build) -> Self { - BuildResult { build } - } -} - #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { LocalDocker, } - -type ParentBuild = Build; - -pub enum CacheResult { - MissWithoutParentBuild, - Miss(ParentBuild), - Hit, -} diff --git a/src/cloud_provider/aws/application.rs b/src/cloud_provider/aws/application.rs deleted file mode 100644 index 7768f164..00000000 --- a/src/cloud_provider/aws/application.rs +++ /dev/null @@ -1,452 +0,0 @@ -use tera::Context as TeraContext; - -use crate::build_platform::Image; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm, - Pause, Service, ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::EngineError; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; - -pub struct Application { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl Application { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - Application { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - image, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/aws/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for Application {} - -impl ToTransmitter for Application { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id.to_string(), self.name.to_string()) - } -} - -impl Service for Application { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let mut context = default_tera_context(self, target.kubernetes, target.environment); - let commit_id = self.image().commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - - match &self.image().registry_url { - Some(registry_url) => context.insert("image_name_with_tag", registry_url.as_str()), - None => { - let image_name_with_tag = self.image().name_with_tag(); - - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - - match self.image.registry_name.as_ref() { - Some(registry_name) => { - context.insert("is_registry_secret", &true); - context.insert("registry_secret", registry_name); - } - None => { - context.insert("is_registry_secret", &false); - } - }; - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::SC1 => "sc1", - StorageType::ST1 => "st1", - StorageType::GP2 => "gp2", - StorageType::IO1 => "io1", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for Application { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for Application { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for Application { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) - }) - } -} - -impl Listen for Application { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - SC1, - ST1, - GP2, - IO1, -} diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index e35bcd48..386a0b88 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -16,12 +16,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDbAws { context: Context, id: String, action: Action, @@ -37,7 +37,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDbAws { pub fn new( context: Context, id: &str, @@ -53,7 +53,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDbAws { context, action, id: id.to_string(), @@ -92,13 +92,17 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDbAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl Service for MongoDB { +impl Service for MongoDbAws { fn context(&self) -> &Context { &self.context } @@ -119,7 +123,7 @@ impl Service for MongoDB { // https://docs.aws.amazon.com/documentdb/latest/developerguide/limits.html#limits-naming_constraints let prefix = "mongodb"; let max_size = 60 - prefix.len(); // 63 (max DocumentDB) - 3 (k8s statefulset chars) - let mut new_name = format!("{}{}", prefix, self.name().replace("_", "").replace("-", "")); + let mut new_name = format!("{}{}", prefix, self.name().replace('_', "").replace('-', "")); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); } @@ -179,7 +183,7 @@ impl Service for MongoDB { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -187,7 +191,7 @@ impl Service for MongoDB { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -200,10 +204,7 @@ impl Service for MongoDB { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -225,10 +226,7 @@ impl Service for MongoDB { context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -243,19 +241,15 @@ impl Service for MongoDB { } } -impl Database for MongoDB {} +impl Database for MongoDbAws {} -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDbAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Helm for MongoDB { +impl Helm for MongoDbAws { fn helm_selector(&self) -> Option { self.selector() } @@ -277,7 +271,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDbAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -287,7 +281,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDbAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -307,12 +301,7 @@ impl Create for MongoDB { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -330,7 +319,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDbAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -368,7 +357,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDbAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -405,7 +394,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDbAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -439,10 +428,7 @@ fn get_managed_mongodb_version(requested_version: String) -> Result, } -impl MySQL { +impl MySQLAws { pub fn new( context: Context, id: &str, @@ -93,23 +93,23 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for MySQL { +impl Service for MySQLAws { fn context(&self) -> &Context { &self.context } @@ -184,7 +184,7 @@ impl Service for MySQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -200,7 +200,7 @@ impl Service for MySQL { Ok(v) => v, Err(e) => { return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details.clone(), + event_details, "MySQL".to_string(), "parameter_group_family".to_string(), version.matched_version().to_string(), @@ -219,10 +219,7 @@ impl Service for MySQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -244,10 +241,7 @@ impl Service for MySQL { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -262,9 +256,9 @@ impl Service for MySQL { } } -impl Database for MySQL {} +impl Database for MySQLAws {} -impl Helm for MySQL { +impl Helm for MySQLAws { fn helm_selector(&self) -> Option { self.selector() } @@ -286,7 +280,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -296,7 +290,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -316,12 +310,7 @@ impl Create for MySQL { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -340,7 +329,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -377,7 +366,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -414,7 +403,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -459,10 +448,7 @@ fn get_managed_mysql_version(requested_version: String) -> Result, } -impl PostgreSQL { +impl PostgreSQLAws { pub fn new( context: Context, id: &str, @@ -54,7 +54,7 @@ impl PostgreSQL { listeners: Listeners, logger: Box, ) -> Self { - PostgreSQL { + PostgreSQLAws { context, action, id: id.to_string(), @@ -93,23 +93,23 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgreSQLAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgreSQLAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for PostgreSQL { +impl Service for PostgreSQLAws { fn context(&self) -> &Context { &self.context } @@ -184,7 +184,7 @@ impl Service for PostgreSQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -192,7 +192,7 @@ impl Service for PostgreSQL { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -205,10 +205,7 @@ impl Service for PostgreSQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_name", self.sanitized_name().as_str()); context.insert("database_db_name", self.name()); @@ -232,10 +229,7 @@ impl Service for PostgreSQL { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -250,9 +244,9 @@ impl Service for PostgreSQL { } } -impl Database for PostgreSQL {} +impl Database for PostgreSQLAws {} -impl Helm for PostgreSQL { +impl Helm for PostgreSQLAws { fn helm_selector(&self) -> Option { self.selector() } @@ -274,7 +268,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgreSQLAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -284,7 +278,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgreSQLAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -304,12 +298,7 @@ impl Create for PostgreSQL { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -328,7 +317,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgreSQLAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -366,7 +355,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgreSQLAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -404,7 +393,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgreSQLAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -451,10 +440,7 @@ fn get_managed_postgres_version(requested_version: String) -> Result, } -impl Redis { +impl RedisAws { pub fn new( context: Context, id: &str, @@ -90,23 +90,23 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisAws { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisAws { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for Redis { +impl Service for RedisAws { fn context(&self) -> &Context { &self.context } @@ -127,7 +127,7 @@ impl Service for Redis { // https://aws.amazon.com/about-aws/whats-new/2019/08/elasticache_supports_50_chars_cluster_name let prefix = "redis"; let max_size = 47 - prefix.len(); // 50 (max Elasticache ) - 3 (k8s statefulset chars) - let mut new_name = self.name().replace("_", "").replace("-", ""); + let mut new_name = self.name().replace('_', "").replace('-', ""); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); @@ -188,7 +188,7 @@ impl Service for Redis { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -204,7 +204,7 @@ impl Service for Redis { "default.redis6.x" } else { return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details.clone(), + event_details, "Elasicache".to_string(), "database_elasticache_parameter_group_name".to_string(), format!("default.redis{}", version), @@ -225,10 +225,7 @@ impl Service for Redis { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -248,10 +245,7 @@ impl Service for Redis { context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -266,9 +260,9 @@ impl Service for Redis { } } -impl Database for Redis {} +impl Database for RedisAws {} -impl Helm for Redis { +impl Helm for RedisAws { fn helm_selector(&self) -> Option { self.selector() } @@ -290,7 +284,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisAws { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/aws/services/common", self.context.lib_root_dir()) } @@ -300,7 +294,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -320,12 +314,7 @@ impl Create for Redis { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -343,7 +332,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisAws { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -381,7 +370,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -418,7 +407,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -448,10 +437,7 @@ fn get_managed_redis_version(requested_version: String) -> Result String { mod tests_aws_databases_parameters { use crate::cloud_provider::aws::databases::utilities::get_parameter_group_from_version; use crate::cloud_provider::utilities::VersionsNumber; - use crate::models::DatabaseKind; + use crate::io_models::DatabaseKind; use std::str::FromStr; #[test] diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 78279ba8..081a9c7d 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -68,7 +68,7 @@ pub fn aws_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -79,21 +79,18 @@ pub fn aws_helm_charts( let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -153,7 +150,7 @@ pub fn aws_helm_charts( ..Default::default() }, }; - let is_cni_old_installed_version = match aws_vpc_cni_chart.is_cni_old_installed_version(kubernetes_config, &envs) { + let is_cni_old_installed_version = match aws_vpc_cni_chart.is_cni_old_installed_version(kubernetes_config, envs) { Ok(x) => x, Err(e) => return Err(e), }; @@ -478,6 +475,10 @@ pub fn aws_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), @@ -659,11 +660,11 @@ datasources: accessKey: '{}' secretKey: '{}' ", - prometheus_internal_url.clone(), + prometheus_internal_url, &loki.chart_info.name, - loki_namespace.to_string(), + loki_namespace, &loki.chart_info.name, - loki_namespace.to_string(), + loki_namespace, chart_config_prerequisites.region.clone(), qovery_terraform_config.aws_iam_cloudwatch_key, qovery_terraform_config.aws_iam_cloudwatch_secret, @@ -1262,8 +1263,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; let args = vec![ @@ -1281,8 +1282,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; let args = vec![ @@ -1300,8 +1301,8 @@ impl HelmChart for AwsVpcCniChart { kubectl_exec_with_output( args.clone(), environment_variables.clone(), - |out| stdout = format!("{}\n{}", stdout, out), - |out| stderr = format!("{}\n{}", stderr, out), + &mut |out| stdout = format!("{}\n{}", stdout, out), + &mut |out| stderr = format!("{}\n{}", stderr, out), )?; Ok(()) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index d63d6a2c..09e93e87 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -38,11 +38,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString, }; +use crate::logger::Logger; use crate::object_storage::s3::S3; use crate::object_storage::ObjectStorage; use crate::string::terraform_list_format; @@ -154,9 +154,9 @@ impl EKS { ) -> Result { let event_details = EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(region.to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id.to_string(), name.to_string()), @@ -170,9 +170,9 @@ impl EKS { Ok(x) => aws_zones.push(x), Err(e) => { return Err(EngineError::new_unsupported_zone( - event_details.clone(), + event_details, region.to_string(), - zone.to_string(), + zone, CommandError::new_from_safe_message(e.to_string()), )) } @@ -181,13 +181,10 @@ impl EKS { for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { - let err = EngineError::new_unsupported_instance_type( - event_details.clone(), - node_group.instance_type.as_str(), - e, - ); + let err = + EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -198,8 +195,8 @@ impl EKS { context.clone(), "s3-temp-id".to_string(), "default-s3".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), region.clone(), true, context.resource_expiration_in_seconds(), @@ -239,7 +236,7 @@ impl EKS { .dns_provider .resolvers() .iter() - .map(|x| format!("{}", x.clone().to_string())) + .map(|x| format!("{}", x.clone())) .collect(); terraform_list_format(managed_dns_resolvers) @@ -258,7 +255,7 @@ impl EKS { &self, event_details: EventDetails, zone_name: &str, - subnet_block: &Vec, + subnet_block: &[String], ) -> Result { if subnet_block.len() % 2 == 1 { return Err(EngineError::new_subnets_count_is_not_even( @@ -276,13 +273,10 @@ impl EKS { event_details: EventDetails, replicas_count: u32, ) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), + )); let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let selector = "cluster-autoscaler-aws-cluster-autoscaler"; let namespace = "kube-system"; @@ -348,10 +342,7 @@ impl EKS { } VpcQoveryNetworkMode::WithoutNatGateways => {} }; - context.insert( - "vpc_qovery_network_mode", - &self.options.vpc_qovery_network_mode.to_string(), - ); + context.insert("vpc_qovery_network_mode", &self.options.vpc_qovery_network_mode.to_string()); let rds_zone_a_subnet_blocks = format_ips(&self.options.rds_zone_a_subnet_blocks); let rds_zone_b_subnet_blocks = format_ips(&self.options.rds_zone_b_subnet_blocks); @@ -386,29 +377,17 @@ impl EKS { context.insert("organization_id", self.cloud_provider.organization_id()); context.insert("qovery_api_url", &qovery_api_url); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); context.insert("test_cluster", &self.context.is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } context.insert("force_upgrade", &self.context.requires_forced_upgrade()); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); context.insert( "metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory), @@ -425,14 +404,8 @@ impl EKS { context.insert("managed_dns", &managed_dns_list); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); context.insert( "managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format, @@ -458,7 +431,7 @@ impl EKS { // Vault context.insert("vault_auth_method", "none"); - if let Some(_) = env::var_os("VAULT_ADDR") { + if env::var_os("VAULT_ADDR").is_some() { // select the correct used method match env::var_os("VAULT_ROLE_ID") { Some(role_id) => { @@ -467,20 +440,17 @@ impl EKS { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details.clone(), - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { - if let Some(_) = env::var_os("VAULT_TOKEN") { + if env::var_os("VAULT_TOKEN").is_some() { context.insert("vault_auth_method", "token") } } @@ -525,7 +495,7 @@ impl EKS { // AWS - EKS context.insert("aws_availability_zones", &aws_zones); - context.insert("eks_cidr_subnet", &eks_cidr_subnet.clone()); + context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); @@ -558,18 +528,9 @@ impl EKS { // AWS - Elasticsearch context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); - context.insert( - "elasticsearch_zone_a_subnet_blocks", - &elasticsearch_zone_a_subnet_blocks, - ); - context.insert( - "elasticsearch_zone_b_subnet_blocks", - &elasticsearch_zone_b_subnet_blocks, - ); - context.insert( - "elasticsearch_zone_c_subnet_blocks", - &elasticsearch_zone_c_subnet_blocks, - ); + context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); + context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); + context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); // grafana credentials context.insert("grafana_admin_user", self.options.grafana_admin_user.as_str()); @@ -590,13 +551,10 @@ impl EKS { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); let listeners_helper = ListenersHelper::new(&self.listeners); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), + )); self.send_to_customer( format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -616,28 +574,18 @@ impl EKS { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -648,23 +596,17 @@ impl EKS { self.cloud_provider.access_key_id().as_str(), self.cloud_provider.secret_access_key().as_str(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Role {} is already present, no need to create", - role.role_name - )), - ), - ), - Err(e) => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), - None, - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Role {} is already present, no need to create", + role.role_name + )), + )), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), + None, + )), } } @@ -679,9 +621,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -690,24 +632,20 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), + )); self.send_to_customer( format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, @@ -721,16 +659,13 @@ impl EKS { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -740,21 +675,15 @@ impl EKS { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } // kubernetes helm deployments on the cluster @@ -776,11 +705,11 @@ impl EKS { cluster_id: self.id.clone(), cluster_long_id: self.long_id, region: self.region(), - cluster_name: self.cluster_name().to_string(), + cluster_name: self.cluster_name(), cloud_provider: "aws".to_string(), test_cluster: self.context.is_test_cluster(), - aws_access_key_id: self.cloud_provider.access_key_id().to_string(), - aws_secret_access_key: self.cloud_provider.secret_access_key().to_string(), + aws_access_key_id: self.cloud_provider.access_key_id(), + aws_secret_access_key: self.cloud_provider.secret_access_key(), vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(), qovery_engine_location: self.get_engine_location(), ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory), @@ -796,24 +725,21 @@ impl EKS { disable_pleco: self.context.disable_pleco(), }; - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = aws_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, Some(&temp_dir), - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, ) .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -826,39 +752,29 @@ impl EKS { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("EKS.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("EKS.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new(ok_line, None)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -868,13 +784,10 @@ impl EKS { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -888,13 +801,10 @@ impl EKS { &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -911,9 +821,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -922,13 +832,12 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -946,18 +855,14 @@ impl EKS { tf_workers_resources_name } Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details.clone(), e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes( - event_details.clone(), - None, - )); + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); } let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; @@ -983,7 +888,7 @@ impl EKS { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.to_string()), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); } _ => {} } @@ -1004,17 +909,17 @@ impl EKS { match wait_engine_job_finish { Ok(_) => { - self.logger().log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { return Err(error) } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) } } } - false => self.logger().log(LogLevel::Warning, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("The Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), } } @@ -1028,39 +933,28 @@ impl EKS { format!("Pausing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - event_details.clone(), - EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), + )); match terraform_exec(temp_dir.as_str(), terraform_args) { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); Ok(()) } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )), + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), } } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), + )); Ok(()) } @@ -1074,13 +968,10 @@ impl EKS { format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1093,9 +984,9 @@ impl EKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1104,13 +995,12 @@ impl EKS { // this is due to the required dependencies of lib/aws/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1119,13 +1009,10 @@ impl EKS { Ok(x) => x, Err(e) => { let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(safe_message.to_string(), Some(e.message())), + )); skip_kubernetes_step = true; "".to_string() @@ -1140,27 +1027,19 @@ impl EKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; if !skip_kubernetes_step { @@ -1170,10 +1049,10 @@ impl EKS { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( @@ -1186,13 +1065,10 @@ impl EKS { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( @@ -1200,28 +1076,22 @@ impl EKS { namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -1232,13 +1102,10 @@ impl EKS { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1248,10 +1115,8 @@ impl EKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion let helm = Helm::new( @@ -1260,7 +1125,7 @@ impl EKS { ) .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion @@ -1271,51 +1136,39 @@ impl EKS { self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( @@ -1324,90 +1177,64 @@ impl EKS { self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1420,34 +1247,28 @@ impl EKS { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), + )); Ok(()) } @@ -1522,7 +1343,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1536,7 +1357,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1555,13 +1376,10 @@ impl Kubernetes for EKS { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1574,21 +1392,13 @@ impl Kubernetes for EKS { match &kubernetes_upgrade_status.required_upgrade_on { Some(KubernetesNodesType::Masters) => { self.send_to_customer( - format!( - "Start upgrading process for master nodes on {}/{}", - self.name(), - self.id() - ) - .as_str(), + format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), + )); // AWS requires the upgrade to be done in 2 steps (masters, then workers) // use the current kubernetes masters' version for workers, in order to avoid migration in one step @@ -1608,9 +1418,9 @@ impl Kubernetes for EKS { context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1622,9 +1432,9 @@ impl Kubernetes for EKS { common_charts_temp_dir.as_str(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1633,63 +1443,44 @@ impl Kubernetes for EKS { format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => { self.send_to_customer( - format!( - "Kubernetes {} master nodes have been successfully upgraded", - self.name() - ) - .as_str(), + format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes master nodes have been successfully upgraded.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes master nodes have been successfully upgraded.".to_string(), ), - ); + )); } Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } } Some(KubernetesNodesType::Workers) => { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), ), - ); + )); } None => { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), ), - ); + )); return Ok(()); } } @@ -1701,7 +1492,7 @@ impl Kubernetes for EKS { self.cloud_provider().credentials_environment_variables(), Stage::Infrastructure(InfrastructureStep::Upgrade), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1709,20 +1500,13 @@ impl Kubernetes for EKS { // Upgrade worker nodes // self.send_to_customer( - format!( - "Preparing workers nodes for upgrade for Kubernetes cluster {}", - self.name() - ) - .as_str(), + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); // disable cluster autoscaler to avoid interfering with AWS upgrade procedure context.insert("enable_cluster_autoscaler", &false); @@ -1737,9 +1521,9 @@ impl Kubernetes for EKS { context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1752,9 +1536,9 @@ impl Kubernetes for EKS { crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1763,13 +1547,10 @@ impl Kubernetes for EKS { format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), + )); // Disable cluster autoscaler deployment let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; @@ -1777,36 +1558,26 @@ impl Kubernetes for EKS { match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => { self.send_to_customer( - format!( - "Kubernetes {} workers nodes have been successfully upgraded", - self.name() - ) - .as_str(), + format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes workers nodes have been successfully upgraded.".to_string(), - ), + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes workers nodes have been successfully upgraded.".to_string(), ), - ); + )); } Err(e) => { // enable cluster autoscaler deployment let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } // enable cluster autoscaler deployment - self.set_cluster_autoscaler_replicas(event_details.clone(), 1) + self.set_cluster_autoscaler_replicas(event_details, 1) } #[named] @@ -1817,7 +1588,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1831,7 +1602,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1845,7 +1616,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1859,7 +1630,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1873,7 +1644,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -1887,7 +1658,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -1901,7 +1672,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -1915,7 +1686,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -1971,7 +1742,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -1999,7 +1770,7 @@ impl Kubernetes for EKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) diff --git a/src/cloud_provider/aws/kubernetes/node.rs b/src/cloud_provider/aws/kubernetes/node.rs index 8f0bdd0f..2a88784f 100644 --- a/src/cloud_provider/aws/kubernetes/node.rs +++ b/src/cloud_provider/aws/kubernetes/node.rs @@ -10,6 +10,7 @@ pub enum AwsInstancesType { T2Xlarge, // 4 cores 16Gb RAM T3Large, // 2 cores 8Gb RAM T3Xlarge, // 4 cores 16Gb RAM + T3aMedium, // 2 cores 4Gb RAM T3aLarge, // 2 cores 8Gb RAM T3a2xlarge, // 8 cores 32Gb RAM } @@ -21,6 +22,7 @@ impl InstanceType for AwsInstancesType { AwsInstancesType::T2Xlarge => "t2x.large", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", + AwsInstancesType::T3aMedium => "t3a.medium", AwsInstancesType::T3aLarge => "t3a.large", AwsInstancesType::T3a2xlarge => "t3a.2xlarge", } @@ -35,6 +37,7 @@ impl AwsInstancesType { AwsInstancesType::T2Xlarge => "t2x.large", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", + AwsInstancesType::T3aMedium => "t3a.medium", AwsInstancesType::T3aLarge => "t3a.large", AwsInstancesType::T3a2xlarge => "t3a.2xlarge", } @@ -48,6 +51,7 @@ impl fmt::Display for AwsInstancesType { AwsInstancesType::T2Xlarge => write!(f, "t2x.large"), AwsInstancesType::T3Large => write!(f, "t3.large"), AwsInstancesType::T3Xlarge => write!(f, "t3.xlarge"), + AwsInstancesType::T3aMedium => write!(f, "t3a.medium"), AwsInstancesType::T3aLarge => write!(f, "t3a.large"), AwsInstancesType::T3a2xlarge => write!(f, "t3a.2xlarge"), } @@ -63,11 +67,12 @@ impl FromStr for AwsInstancesType { "t2x.large" => Ok(AwsInstancesType::T2Xlarge), "t3.large" => Ok(AwsInstancesType::T3Large), "t3.xlarge" => Ok(AwsInstancesType::T3Xlarge), + "t3a.medium" => Ok(AwsInstancesType::T3aMedium), "t3a.large" => Ok(AwsInstancesType::T3aLarge), "t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index e0f7e2ea..7dba78d6 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -9,10 +9,9 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY}; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; use crate::runtime::block_on; -pub mod application; pub mod databases; pub mod kubernetes; pub mod regions; @@ -58,12 +57,7 @@ impl AWS { } pub fn credentials(&self) -> StaticProvider { - StaticProvider::new( - self.access_key_id.to_string(), - self.secret_access_key.to_string(), - None, - None, - ) + StaticProvider::new(self.access_key_id.to_string(), self.secret_access_key.to_string(), None, None) } pub fn client(&self) -> Client { @@ -115,11 +109,7 @@ impl CloudProvider for AWS { match s { Ok(_x) => Ok(()), - Err(_) => { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - } + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials(event_details)), } } diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index bc6aa954..5a719c07 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -1,6 +1,6 @@ use crate::cloud_provider::aws::regions::AwsZones::*; use crate::cloud_provider::aws::regions::RegionAndZoneErrors::*; -use crate::models::ToTerraformString; +use crate::io_models::ToTerraformString; use serde::{Deserialize, Serialize}; use std::fmt; use std::fmt::{Display, Formatter}; @@ -102,7 +102,7 @@ pub enum AwsZones { impl ToTerraformString for AwsZones { fn to_terraform_format_string(&self) -> String { - format!("\"{}\"", self.to_string()) + format!("\"{}\"", self) } } @@ -171,11 +171,6 @@ impl AwsRegion { self } - pub fn to_string(&self) -> String { - let enum_name = format!("{}", self); - format!("{}", enum_name) - } - pub fn to_aws_format(&self) -> String { match self { AwsRegion::UsEast1 => "us-east-1", @@ -303,8 +298,46 @@ impl Display for RegionAndZoneErrors { } impl AwsZones { - pub fn to_string(&self) -> String { - match self { + pub fn from_string(zone: String) -> Result { + // create tmp region from zone and get zone name (one letter) + let sanitized_zone_name = zone.to_lowercase().replace('-', "").replace('_', ""); + let mut sanitized_region = sanitized_zone_name.clone(); + sanitized_region.pop(); + + // ensure the region exists + let region = match AwsRegion::from_str(&sanitized_region) { + Ok(x) => x, + Err(_) => return Err(RegionNotFound), + }; + if region.to_string().to_lowercase() != sanitized_region { + return Err(RegionNotFound); + }; + + // check if the zone is currently supported + for zone in region.get_zones() { + if zone.to_string().replace('-', "") == sanitized_zone_name { + return Ok(zone); + } + } + + Err(ZoneNotSupported) + } + + pub fn get_region(&self) -> String { + let zone = self.to_string(); + zone[0..zone.len() - 1].to_string() + } +} + +impl fmt::Display for AwsRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Display for AwsZones { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let str = match self { UsEast1A => "us-east-1a", UsEast1B => "us-east-1b", UsEast1C => "us-east-1c", @@ -371,50 +404,9 @@ impl AwsZones { SaEast1A => "sa-east-1a", SaEast1B => "sa-east-1b", SaEast1C => "sa-east-1c", - } - .to_string() - } - - pub fn from_string(zone: String) -> Result { - // create tmp region from zone and get zone name (one letter) - let sanitized_zone_name = zone.to_lowercase().replace("-", "").replace("_", ""); - let mut sanitized_region = sanitized_zone_name.clone(); - sanitized_region.pop(); - - // ensure the region exists - let region = match AwsRegion::from_str(&sanitized_region) { - Ok(x) => x, - Err(_) => return Err(RegionNotFound), - }; - if region.to_string().to_lowercase() != sanitized_region { - return Err(RegionNotFound); }; - // check if the zone is currently supported - for zone in region.get_zones() { - if zone.to_string().replace("-", "") == sanitized_zone_name { - return Ok(zone); - } - } - - Err(ZoneNotSupported) - } - - pub fn get_region(&self) -> String { - let zone = self.to_string(); - zone[0..zone.len() - 1].to_string() - } -} - -impl fmt::Display for AwsRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl fmt::Display for AwsZones { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) + write!(f, "{}", str) } } diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index fd4aada7..769180f4 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -12,11 +12,11 @@ use crate::cmd::helm; use crate::cmd::helm::{to_engine_error, Timeout}; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::logger::Logger; use ::function_name::named; -pub struct Router { +pub struct RouterAws { context: Context, id: String, name: String, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterAws { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterAws { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterAws { fn context(&self) -> &Context { &self.context } @@ -133,8 +133,8 @@ impl Service for Router { let mut context = default_tera_context(self, kubernetes, environment); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -154,7 +154,7 @@ impl Service for Router { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) @@ -167,8 +167,6 @@ impl Service for Router { _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); // autoscaler @@ -194,27 +192,21 @@ impl Service for Router { Some(hostname) => context.insert("external_ingress_hostname_default", hostname.as_str()), None => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe( - "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), - ), + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe( + "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), - ); + )); } }, _ => { // FIXME really? // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), + )); } } @@ -250,7 +242,7 @@ impl Service for Router { } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterAws { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -266,7 +258,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterAws { fn helm_selector(&self) -> Option { self.selector() } @@ -288,7 +280,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterAws { fn listeners(&self) -> &Listeners { &self.listeners } @@ -298,15 +290,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterAws { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterAws { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterAws { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -334,9 +330,9 @@ impl Create for Router { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -361,7 +357,7 @@ impl Create for Router { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } @@ -384,19 +380,16 @@ impl Create for Router { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } @@ -422,7 +415,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterAws { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -456,7 +449,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterAws { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -468,7 +461,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -486,6 +479,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs deleted file mode 100644 index b7dd0d2b..00000000 --- a/src/cloud_provider/digitalocean/application.rs +++ /dev/null @@ -1,535 +0,0 @@ -use tera::Context as TeraContext; - -use crate::build_platform::Image; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Create, Delete, Helm, Pause, Service, ServiceType, - StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; -use std::fmt; -use std::str::FromStr; - -pub struct Application { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl Application { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - Application { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - image, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - self.storage.len() > 0 - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name, self.id), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/digitalocean/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for Application {} - -impl ToTransmitter for Application { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for Application { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - - match &self.image.registry_url { - Some(registry_url) => context.insert("image_name_with_tag", registry_url.as_str()), - None => { - let image_name_with_tag = self.image.name_with_tag(); - - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - - if self.image.registry_name.is_some() { - context.insert("is_registry_secret", &true); - context.insert( - "registry_secret", - &"do-container-registry-secret-for-cluster".to_string(), - ); - } else { - context.insert("is_registry_secret", &false); - }; - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::Standard => "do-block-storage", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = storage.len() > 0; - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for Application { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for Application { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for Application { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) - }) - } -} - -impl Listen for Application { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - Standard, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum DoRegion { - NewYorkCity1, - NewYorkCity2, - NewYorkCity3, - Amsterdam2, - Amsterdam3, - SanFrancisco1, - SanFrancisco2, - SanFrancisco3, - Singapore, - London, - Frankfurt, - Toronto, - Bangalore, -} - -impl DoRegion { - pub fn as_str(&self) -> &str { - match self { - DoRegion::NewYorkCity1 => "nyc1", - DoRegion::NewYorkCity2 => "nyc2", - DoRegion::NewYorkCity3 => "nyc3", - DoRegion::Amsterdam2 => "ams2", - DoRegion::Amsterdam3 => "ams3", - DoRegion::SanFrancisco1 => "sfo1", - DoRegion::SanFrancisco2 => "sfo2", - DoRegion::SanFrancisco3 => "sfo3", - DoRegion::Singapore => "sgp1", - DoRegion::London => "lon1", - DoRegion::Frankfurt => "fra1", - DoRegion::Toronto => "tor1", - DoRegion::Bangalore => "blr1", - } - } -} - -impl fmt::Display for DoRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - DoRegion::NewYorkCity1 => write!(f, "nyc1"), - DoRegion::NewYorkCity2 => write!(f, "nyc2"), - DoRegion::NewYorkCity3 => write!(f, "nyc3"), - DoRegion::Amsterdam2 => write!(f, "ams2"), - DoRegion::Amsterdam3 => write!(f, "ams3"), - DoRegion::SanFrancisco1 => write!(f, "sfo1"), - DoRegion::SanFrancisco2 => write!(f, "sfo2"), - DoRegion::SanFrancisco3 => write!(f, "sfo3"), - DoRegion::Singapore => write!(f, "sgp1"), - DoRegion::London => write!(f, "lon1"), - DoRegion::Frankfurt => write!(f, "fra1"), - DoRegion::Toronto => write!(f, "tor1"), - DoRegion::Bangalore => write!(f, "blr1"), - } - } -} - -impl FromStr for DoRegion { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "nyc1" => Ok(DoRegion::NewYorkCity1), - "nyc2" => Ok(DoRegion::NewYorkCity2), - "nyc3" => Ok(DoRegion::NewYorkCity3), - "ams2" => Ok(DoRegion::Amsterdam2), - "ams3" => Ok(DoRegion::Amsterdam3), - "sfo1" => Ok(DoRegion::SanFrancisco1), - "sfo2" => Ok(DoRegion::SanFrancisco2), - "sfo3" => Ok(DoRegion::SanFrancisco3), - "sgp1" => Ok(DoRegion::Singapore), - "lon1" => Ok(DoRegion::London), - "fra1" => Ok(DoRegion::Frankfurt), - "tor1" => Ok(DoRegion::Toronto), - "blr1" => Ok(DoRegion::Bangalore), - _ => { - return Err(CommandError::new_from_safe_message(format!( - "`{}` region is not supported", - s - ))); - } - } - } -} diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index b05ca2a7..1c179c2f 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDo { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDo { context, action, id: id.to_string(), @@ -67,7 +67,7 @@ impl MongoDB { fn matching_correct_version(&self, event_details: EventDetails) -> Result { check_service_version( - get_self_hosted_mongodb_version(self.version().clone()), + get_self_hosted_mongodb_version(self.version()), self, event_details, self.logger(), @@ -83,23 +83,23 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for MongoDB { +impl Service for MongoDo { fn context(&self) -> &Context { &self.context } @@ -171,7 +171,7 @@ impl Service for MongoDB { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -179,7 +179,7 @@ impl Service for MongoDB { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -192,10 +192,7 @@ impl Service for MongoDB { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -213,27 +210,24 @@ impl Service for MongoDB { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for MongoDB {} +impl Database for MongoDo {} -impl Helm for MongoDB { +impl Helm for MongoDo { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +249,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -265,7 +259,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -303,7 +297,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -341,7 +335,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -379,7 +373,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index 9ab89351..5bffb434 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MySQL { +pub struct MySQLDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MySQL { logger: Box, } -impl MySQL { +impl MySQLDo { pub fn new( context: Context, id: &str, @@ -83,23 +83,23 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for MySQL { +impl Service for MySQLDo { fn context(&self) -> &Context { &self.context } @@ -171,7 +171,7 @@ impl Service for MySQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -179,7 +179,7 @@ impl Service for MySQL { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -192,10 +192,7 @@ impl Service for MySQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -213,10 +210,7 @@ impl Service for MySQL { context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -231,9 +225,9 @@ impl Service for MySQL { } } -impl Database for MySQL {} +impl Database for MySQLDo {} -impl Helm for MySQL { +impl Helm for MySQLDo { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +249,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -265,7 +259,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -306,7 +300,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -344,7 +338,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -383,7 +377,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index d539ee5d..2b47a106 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct PostgreSQL { +pub struct PostgresDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct PostgreSQL { logger: Box, } -impl PostgreSQL { +impl PostgresDo { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl PostgreSQL { listeners: Listeners, logger: Box, ) -> Self { - PostgreSQL { + PostgresDo { context, action, id: id.to_string(), @@ -83,23 +83,23 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgresDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgresDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for PostgreSQL { +impl Service for PostgresDo { fn context(&self) -> &Context { &self.context } @@ -171,7 +171,7 @@ impl Service for PostgreSQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -179,7 +179,7 @@ impl Service for PostgreSQL { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -192,10 +192,7 @@ impl Service for PostgreSQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name()); context.insert("database_login", self.options.login.as_str()); @@ -215,10 +212,7 @@ impl Service for PostgreSQL { context.insert("delete_automated_backups", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -233,9 +227,9 @@ impl Service for PostgreSQL { } } -impl Database for PostgreSQL {} +impl Database for PostgresDo {} -impl Helm for PostgreSQL { +impl Helm for PostgresDo { fn helm_selector(&self) -> Option { self.selector() } @@ -257,7 +251,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgresDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -267,7 +261,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgresDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -307,7 +301,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgresDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -345,7 +339,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgresDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -385,7 +379,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgresDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 98284ca0..a06684d9 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Redis { +pub struct RedisDo { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct Redis { logger: Box, } -impl Redis { +impl RedisDo { pub fn new( context: Context, id: &str, @@ -83,23 +83,23 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisDo { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisDo { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for Redis { +impl Service for RedisDo { fn context(&self) -> &Context { &self.context } @@ -177,7 +177,7 @@ impl Service for Redis { ); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); @@ -192,10 +192,7 @@ impl Service for Redis { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -212,27 +209,24 @@ impl Service for Redis { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for Redis {} +impl Database for RedisDo {} -impl Helm for Redis { +impl Helm for RedisDo { fn helm_selector(&self) -> Option { self.selector() } @@ -254,7 +248,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisDo { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/digitalocean/services/common", self.context.lib_root_dir()) } @@ -264,7 +258,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -304,7 +298,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisDo { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -341,7 +335,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -380,7 +374,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisDo { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/digitalocean/do_api_common.rs b/src/cloud_provider/digitalocean/do_api_common.rs index c98ab564..69471800 100644 --- a/src/cloud_provider/digitalocean/do_api_common.rs +++ b/src/cloud_provider/digitalocean/do_api_common.rs @@ -42,15 +42,15 @@ pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Res api_type ); return Err(CommandError::new( - format!("{}, response: {:?}", message_safe.to_string(), response), - Some(message_safe.to_string()), + format!("{}, response: {:?}", message_safe, response), + Some(message_safe), )); } _ => { let message_safe = format!("Unknown status code received from Digital Ocean Kubernetes API while retrieving {} information.", api_type); return Err(CommandError::new( - format!("{}, response: {:?}", message_safe.to_string(), response), - Some(message_safe.to_string()), + format!("{}, response: {:?}", message_safe, response), + Some(message_safe), )); } } diff --git a/src/cloud_provider/digitalocean/kubernetes/cidr.rs b/src/cloud_provider/digitalocean/kubernetes/cidr.rs index c5a46fbc..ff2ac779 100644 --- a/src/cloud_provider/digitalocean/kubernetes/cidr.rs +++ b/src/cloud_provider/digitalocean/kubernetes/cidr.rs @@ -17,17 +17,11 @@ pub struct DoVpc { pub fn get_used_cidr_on_region(token: &str) { let mut output_from_cli = String::new(); - let mut cmd = QoveryCommand::new("doctl", &vec!["vpcs", "list", "--output", "json", "-t", token], &vec![]); - let _ = cmd.exec_with_output( - |r_out| output_from_cli.push_str(&r_out), - |r_err| { - error!( - "DOCTL CLI error from cmd inserted, please check vpcs list command{}", - r_err - ) - }, - ); + let mut cmd = QoveryCommand::new("doctl", &["vpcs", "list", "--output", "json", "-t", token], &[]); + let _ = cmd.exec_with_output(&mut |r_out| output_from_cli.push_str(&r_out), &mut |r_err| { + error!("DOCTL CLI error from cmd inserted, please check vpcs list command{}", r_err) + }); let buff = output_from_cli.borrow(); - let _array: Vec = serde_json::from_str(&buff).expect("JSON is not well-formatted"); + let _array: Vec = serde_json::from_str(buff).expect("JSON is not well-formatted"); } diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index e07688f6..3f393de8 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -27,7 +27,7 @@ pub fn get_doks_info_from_name( Err(e) => { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; return Err(CommandError::new( - format!("{}, error: {}", safe_message.to_string(), e.to_string()), + format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()), )); } @@ -51,7 +51,7 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result { let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; return Err(CommandError::new( - format!("{}, error: {}", safe_message.to_string(), e.to_string()), + format!("{}, error: {}", safe_message, e), Some(safe_message.to_string()), )); } @@ -60,7 +60,7 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result, + doks_versions: &[KubernetesVersion], wished_version: &str, ) -> Result, CommandError> { let wished_k8s_version = VersionsNumber::from_str(wished_version)?; @@ -76,10 +76,45 @@ fn get_do_kubernetes_latest_slug_version( Err(CommandError::new_from_safe_message(format!( "DOKS version `{}` is not supported.", - wished_k8s_version.to_string() + wished_k8s_version ))) } +pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Result, CommandError> { + let clusters_url = format!("{}/clusters", DoApiType::Doks.api_url()); + let clusters_response = do_get_from_api(token, DoApiType::Doks, clusters_url); + let clusters: Result = match clusters_response { + Ok(clusters_response) => match serde_json::from_str(clusters_response.as_str()) { + Ok(clusters) => Ok(clusters), + Err(e) => Err(CommandError::new_from_safe_message(e.to_string())), + }, + Err(e) => Err(CommandError::new_from_safe_message(e.message())), + }; + + let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters; + let cluster_name = cluster_name.trim().to_lowercase(); + match clusters_copy + .into_iter() + .filter(|cluster| cluster.name.trim().to_lowercase() == cluster_name) + .collect::>() + .first() + { + Some(cluster) => { + let kubeconfig_url = format!("{}/clusters/{}/kubeconfig", DoApiType::Doks.api_url(), cluster.id); + match do_get_from_api(token, DoApiType::Doks, kubeconfig_url) { + Ok(kubeconfig) => { + if kubeconfig.is_empty() { + return Ok(None); + } + Ok(Some(kubeconfig)) + } + Err(e) => Err(CommandError::new_from_safe_message(e.message())), + } + } + None => Ok(None), + } +} + #[cfg(test)] mod tests_doks { use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 88c6e0f1..4bac2c68 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -122,7 +122,7 @@ pub fn do_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -133,21 +133,18 @@ pub fn do_helm_charts( let qovery_terraform_config: DigitalOceanQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -328,6 +325,10 @@ pub fn do_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), @@ -501,11 +502,7 @@ datasources: type: loki url: \"http://{}.{}.svc:3100\" ", - prometheus_internal_url, - &loki.chart_info.name, - loki_namespace.to_string(), - &loki.chart_info.name, - loki_namespace.to_string(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace, &loki.chart_info.name, loki_namespace, ); let grafana = CommonChart { @@ -1008,8 +1005,7 @@ datasources: ) ) .as_bytes(), - ) - .to_string(), + ), }, ChartSetValue { key: "do_container_registry_secret_identifier".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 86985aa3..8d58135d 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -1,14 +1,14 @@ use std::borrow::Borrow; use std::env; +use std::fs::File; use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; use crate::cloud_provider::aws::regions::AwsZones; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ - get_do_latest_doks_slug_from_api, get_doks_info_from_name, + get_do_kubeconfig_by_cluster_name, get_do_latest_doks_slug_from_api, get_doks_info_from_name, }; use crate::cloud_provider::digitalocean::kubernetes::helm_charts::{do_helm_charts, ChartsConfigPrerequisites}; use crate::cloud_provider::digitalocean::kubernetes::node::DoInstancesType; @@ -36,14 +36,18 @@ use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_mana use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; -use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, - ProgressScope, QoveryIdentifier, ToHelmString, +use crate::events::{ + EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, }; +use crate::io_models::{ + Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, + ProgressScope, QoveryIdentifier, StringPath, ToHelmString, +}; +use crate::logger::Logger; +use crate::models::digital_ocean::DoRegion; use crate::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use crate::object_storage::ObjectStorage; +use crate::runtime::block_on; use crate::string::terraform_list_format; use crate::{cmd, dns_provider}; use ::function_name::named; @@ -53,6 +57,7 @@ use retry::OperationResult; use std::path::Path; use std::str::FromStr; use std::sync::Arc; +use tokio::io::AsyncWriteExt; pub mod cidr; pub mod doks_api; @@ -124,18 +129,18 @@ impl DOKS { let err = EngineError::new_unsupported_instance_type( EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), Some(region.to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), - Transmitter::Kubernetes(id.to_string(), name.to_string()), + Transmitter::Kubernetes(id, name), ), node_group.instance_type.as_str(), e, ); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -145,8 +150,8 @@ impl DOKS { context.clone(), "spaces-temp-id".to_string(), "my-spaces-object-storage".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), region, BucketDeleteStrategy::HardDelete, ); @@ -182,6 +187,11 @@ impl DOKS { format!("{}.yaml", self.id) } + // TODO(benjaminch): Very dirty quickfix, should be removed and cluster id / name should be handled globally + fn doks_cluster_name(&self) -> String { + format!("qovery-{}", self.id) + } + // create a context to render tf files (terraform) contained in lib/digitalocean/ fn tera_context(&self) -> Result { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); @@ -207,17 +217,15 @@ impl DOKS { Ok(vpcs) => match vpcs { // new vpc: select a random non used subnet None => { - match get_do_random_available_subnet_from_api(&self.cloud_provider.token(), self.region) { + match get_do_random_available_subnet_from_api(self.cloud_provider.token(), self.region) { Ok(x) => x, - Err(e) => { - return Err(EngineError::new_cannot_get_any_available_vpc(event_details.clone(), e)) - } + Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details, e)), } } // existing vpc: assign current subnet in this case Some(vpc) => vpc.ip_range, }, - Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_any_available_vpc(event_details, e)), } } VpcInitKind::Manual => self.options.vpc_cidr_block.clone(), @@ -238,14 +246,8 @@ impl DOKS { context.insert("do_loadbalancer_hostname", &self.do_loadbalancer_hostname()); context.insert("managed_dns_domain", self.dns_provider.domain().to_string().as_str()); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); context.insert( "managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format, @@ -268,10 +270,7 @@ impl DOKS { context.insert("test_cluster", &self.context.is_test_cluster()); context.insert("doks_cluster_id", &self.id()); context.insert("doks_master_name", &self.name()); - context.insert( - "doks_version", - self.get_supported_doks_version(event_details.clone())?.as_str(), - ); + context.insert("doks_version", self.get_supported_doks_version(event_details.clone())?.as_str()); context.insert("do_space_kubeconfig_filename", &self.kubeconfig_file_name()); // Network @@ -283,15 +282,9 @@ impl DOKS { context.insert("object_storage_kubeconfig_bucket", &self.kubeconfig_bucket_name()); context.insert("object_storage_logs_bucket", &self.logs_bucket_name()); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); context.insert("test_cluster", &self.context.is_test_cluster()); context.insert("qovery_api_url", self.options.qovery_api_url.as_str()); @@ -302,19 +295,13 @@ impl DOKS { context.insert("discord_api_key", self.options.discord_api_key.as_str()); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); context.insert( "metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory), ); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } // grafana credentials @@ -369,16 +356,13 @@ impl DOKS { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details.clone(), - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { @@ -403,7 +387,7 @@ impl DOKS { match get_do_latest_doks_slug_from_api(self.cloud_provider.token(), self.version()) { Ok(version) => match version { None => Err(EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, self.kind().to_string(), VersionsNumber::from_str(&self.version) .expect("cannot parse version") @@ -412,7 +396,7 @@ impl DOKS { Some(v) => Ok(v), }, Err(e) => Err(EngineError::new_cannot_get_supported_versions_error( - event_details.clone(), + event_details, self.kind().to_string(), e, )), @@ -433,11 +417,7 @@ impl DOKS { } fn do_loadbalancer_hostname(&self) -> String { - format!( - "qovery-nginx-{}.{}", - self.cloud_provider.id(), - self.dns_provider().domain() - ) + format!("qovery-nginx-{}.{}", self.cloud_provider.id(), self.dns_provider().domain()) } fn lets_encrypt_url(&self) -> String { @@ -453,7 +433,7 @@ impl DOKS { let api_url = format!("{}/clusters", DoApiType::Doks.api_url()); let json_content = do_get_from_api(self.cloud_provider.token(), DoApiType::Doks, api_url)?; // TODO(benjaminch): `qovery-` to be added into Rust name directly everywhere - match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id().to_string())) { + match get_doks_info_from_name(json_content.as_str(), format!("qovery-{}", self.id())) { Ok(cluster_result) => match cluster_result { None => Err(CommandError::new_from_safe_message( "Cluster doesn't exist on DO side.".to_string(), @@ -480,13 +460,10 @@ impl DOKS { )), self.context.execution_id(), )); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing DOKS cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing DOKS cluster deployment.".to_string()), + )); // upgrade cluster instead if required match self.get_kubeconfig_file() { @@ -502,28 +479,22 @@ impl DOKS { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), + self.logger().log(EngineEvent::Error(e, None)); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), ), - ); + )); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -538,9 +509,9 @@ impl DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -549,31 +520,22 @@ impl DOKS { // this is due to the required dependencies of lib/digitalocean/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying DOKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying DOKS cluster.".to_string()), + )); self.send_to_customer( - format!( - "Deploying DOKS {} cluster deployment with id {}", - self.name(), - self.id() - ) - .as_str(), + format!("Deploying DOKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); @@ -585,16 +547,13 @@ impl DOKS { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -604,77 +563,27 @@ impl DOKS { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; - // Kubeconfig bucket - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), - ), - ); - if let Err(e) = self.spaces.create_bucket(self.kubeconfig_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.kubeconfig_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - return Err(error); - } - // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + let error = + EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } - // push config file to object storage - let kubeconfig_name = format!("{}.yaml", self.id()); - if let Err(e) = self.spaces.put( - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str(), - format!( - "{}/{}/{}", - temp_dir.as_str(), - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str() - ) - .as_str(), - ) { - let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - kubeconfig_name.to_string(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); - return Err(error); - } + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); match self.check_workers_on_create() { Ok(_) => { @@ -682,23 +591,17 @@ impl DOKS { format!("Kubernetes {} nodes have been successfully created", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), + )) } Err(e) => { - return Err(EngineError::new_k8s_node_not_ready(event_details.clone(), e)); + return Err(EngineError::new_k8s_node_not_ready(event_details, e)); } }; // kubernetes helm deployments on the cluster - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - let credentials_environment_variables: Vec<(String, String)> = self .cloud_provider .credentials_environment_variables() @@ -708,7 +611,7 @@ impl DOKS { let doks_id = match self.get_doks_info_from_name_api() { Ok(cluster) => cluster.id, - Err(e) => return Err(EngineError::new_cannot_get_cluster_error(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_cluster_error(event_details, e)), }; let charts_prerequisites = ChartsConfigPrerequisites { @@ -718,13 +621,13 @@ impl DOKS { cluster_id: self.id.clone(), cluster_long_id: self.long_id, do_cluster_id: doks_id, - region: self.region().to_string(), - cluster_name: self.cluster_name().to_string(), + region: self.region(), + cluster_name: self.cluster_name(), cloud_provider: "digitalocean".to_string(), test_cluster: self.context.is_test_cluster(), do_token: self.cloud_provider.token().to_string(), - do_space_access_id: self.cloud_provider.access_key_id().to_string(), - do_space_secret_key: self.cloud_provider.secret_access_key().to_string(), + do_space_access_id: self.cloud_provider.access_key_id(), + do_space_secret_key: self.cloud_provider.secret_access_key(), do_space_bucket_kubeconfig: self.kubeconfig_bucket_name(), do_space_kubeconfig_filename: self.kubeconfig_file_name(), qovery_engine_location: self.options.qovery_engine_location.clone(), @@ -743,13 +646,10 @@ impl DOKS { let chart_prefix_path = &temp_dir; - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = do_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, @@ -758,7 +658,7 @@ impl DOKS { .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -793,7 +693,7 @@ impl DOKS { return Err(EngineError::new_k8s_loadbalancer_configuration_issue( event_details.clone(), CommandError::new( - format!("{}, error: {}.", safe_message.to_string(), e.message(),), + format!("{}, error: {}.", safe_message, e.message(),), Some(safe_message.to_string()), ), )); @@ -822,15 +722,12 @@ impl DOKS { ..Default::default() }; - let helm = Helm::new( - &kubeconfig_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; // This will ony print the diff on stdout - let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &vec![]); - helm.upgrade(&load_balancer_dns_hostname, &vec![]) + let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &[]); + helm.upgrade(&load_balancer_dns_hostname, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } @@ -839,39 +736,29 @@ impl DOKS { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("DOKS.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("DOKS.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new(ok_line, None)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Warning(event_details, EventMessage::new(ok_line, None))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("DOKS.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("DOKS.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -881,13 +768,10 @@ impl DOKS { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("DOKS.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("DOKS.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -897,13 +781,10 @@ impl DOKS { } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("DOKS.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("DOKS.pause_error() called.".to_string()), + )); Ok(()) } @@ -911,18 +792,15 @@ impl DOKS { fn delete(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; + let skip_kubernetes_step = false; self.send_to_customer( format!("Preparing to delete DOKS cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete DOKS cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete DOKS cluster.".to_string()), + )); let temp_dir = match self.get_temp_dir(event_details.clone()) { Ok(dir) => dir, @@ -938,9 +816,9 @@ impl DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -950,34 +828,16 @@ impl DOKS { let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); - - skip_kubernetes_step = true; - "".to_string() - } - }; - // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process let message = format!( @@ -986,29 +846,24 @@ impl DOKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); + if !skip_kubernetes_step { // should make the diff between all namespaces and qovery managed namespaces let message = format!( @@ -1016,14 +871,14 @@ impl DOKS { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), ); @@ -1032,42 +887,33 @@ impl DOKS { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -1078,13 +924,10 @@ impl DOKS { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1094,166 +937,123 @@ impl DOKS { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubernetes_config_file_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion uninstall_cert_manager( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), event_details.clone(), self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, qovery_namespace, self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + let message_safe = format!("Error deleting chart `{}`", chart.name); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1266,34 +1066,28 @@ impl DOKS { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("DOKS.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("DOKS.delete_error() called.".to_string()), + )); Ok(()) } @@ -1360,6 +1154,126 @@ impl Kubernetes for DOKS { Ok(()) } + fn get_kubeconfig_file(&self) -> Result<(String, File), EngineError> { + let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); + let bucket_name = format!("qovery-kubeconfigs-{}", self.id()); + let object_key = self.get_kubeconfig_filename(); + let stage = Stage::General(GeneralStep::RetrieveClusterConfig); + + // check if kubeconfig locally exists + let local_kubeconfig = match self.get_temp_dir(event_details.clone()) { + Ok(x) => { + let local_kubeconfig_folder_path = format!("{}/{}", &x, &bucket_name); + let local_kubeconfig_generated = format!("{}/{}", &local_kubeconfig_folder_path, &object_key); + if Path::new(&local_kubeconfig_generated).exists() { + match File::open(&local_kubeconfig_generated) { + Ok(_) => Some(local_kubeconfig_generated), + Err(err) => { + self.logger().log(EngineEvent::Debug( + self.get_event_details(stage), + EventMessage::new( + err.to_string(), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), + ), + )); + None + } + } + } else { + None + } + } + Err(_) => None, + }; + + // otherwise, try to get it from digital ocean api + let result = match local_kubeconfig { + Some(local_kubeconfig_generated) => match File::open(&local_kubeconfig_generated) { + Ok(file) => Ok((StringPath::from(&local_kubeconfig_generated), file)), + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + None => { + let kubeconfig = match get_do_kubeconfig_by_cluster_name( + self.cloud_provider.token(), + self.doks_cluster_name().as_str(), + ) { + Ok(kubeconfig) => match kubeconfig { + None => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new_from_safe_message("Kubeconfig is empty".to_string()), + )) + } + Some(content) => content, + }, + Err(e) => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new(e.message(), Some(e.message())), + )) + } + }; + + let workspace_directory = crate::fs::workspace_directory( + self.context().workspace_root_dir(), + self.context().execution_id(), + format!("object-storage/spaces/{}", self.name()), + ) + .map_err(|err| { + EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(err.to_string(), Some(err.to_string())), + ) + }) + .expect("Unable to create directory"); + + let file_path = format!("{}/qovery-kubeconfigs-{}/{}.yaml", workspace_directory, self.id(), self.id()); + let path = Path::new(file_path.as_str()); + let parent_dir = path.parent().unwrap(); + let _ = block_on(tokio::fs::create_dir_all(parent_dir)); + + match block_on( + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path), + ) { + Ok(mut created_file) => match block_on(created_file.write_all(kubeconfig.as_bytes())) { + Ok(_) => { + let file = File::open(path).unwrap(); + Ok((file_path, file)) + } + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + }, + Err(e) => Err(EngineError::new_cannot_create_file( + event_details.clone(), + CommandError::new(e.to_string(), Some(e.to_string())), + )), + } + } + }; + + match result { + Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( + event_details, + CommandError::new(e.message(), Some(e.message())), + )), + Ok((file_path, file)) => Ok((file_path, file)), + } + } + + fn get_kubeconfig_file_path(&self) -> Result { + let (path, _) = self.get_kubeconfig_file()?; + Ok(path) + } + #[named] fn on_create(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); @@ -1368,7 +1282,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1382,7 +1296,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1400,13 +1314,10 @@ impl Kubernetes for DOKS { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing DOKS cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing DOKS cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1420,7 +1331,7 @@ impl Kubernetes for DOKS { self.cloud_provider().credentials_environment_variables(), event_details.stage().clone(), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1428,26 +1339,19 @@ impl Kubernetes for DOKS { // Upgrade worker nodes // self.send_to_customer( - format!( - "Preparing workers nodes for upgrade for Kubernetes cluster {}", - self.name() - ) - .as_str(), + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); let upgrade_doks_version = match get_do_latest_doks_slug_from_api(self.cloud_provider.token(), self.version()) { Ok(version) => match version { None => { return Err(EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, self.kind().to_string(), VersionsNumber::from_str(&self.version) .expect("cannot parse version") @@ -1458,14 +1362,14 @@ impl Kubernetes for DOKS { }, Err(e) => { return Err(EngineError::new_cannot_get_supported_versions_error( - event_details.clone(), + event_details, self.kind().to_string(), e, )) } }; - context.insert("doks_version", format!("{}", &upgrade_doks_version).as_str()); + context.insert("doks_version", (&upgrade_doks_version).to_string().as_str()); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( self.template_directory.as_str(), @@ -1473,22 +1377,21 @@ impl Kubernetes for DOKS { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1497,13 +1400,10 @@ impl Kubernetes for DOKS { format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => match self.check_workers_on_upgrade(kubernetes_upgrade_status.requested_version.to_string()) { @@ -1512,29 +1412,21 @@ impl Kubernetes for DOKS { format!("Kubernetes {} nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes nodes have been successfully upgraded.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes nodes have been successfully upgraded.".to_string()), + )); } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( - event_details.clone(), + event_details, kubernetes_upgrade_status.requested_version.to_string(), e, )); } }, Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } @@ -1549,7 +1441,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1563,7 +1455,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1577,7 +1469,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1591,7 +1483,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1605,7 +1497,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -1619,7 +1511,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -1633,7 +1525,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -1647,7 +1539,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -1703,7 +1595,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -1731,7 +1623,7 @@ impl Kubernetes for DOKS { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) diff --git a/src/cloud_provider/digitalocean/kubernetes/node.rs b/src/cloud_provider/digitalocean/kubernetes/node.rs index 3a5bb7a5..549fa3eb 100644 --- a/src/cloud_provider/digitalocean/kubernetes/node.rs +++ b/src/cloud_provider/digitalocean/kubernetes/node.rs @@ -113,7 +113,7 @@ impl FromStr for DoInstancesType { "s-32vcpu-192gb" => Ok(DoInstancesType::S32vcpu192gb), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 36d2689d..debccdf3 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -9,9 +9,8 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::DIGITAL_OCEAN_TOKEN; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod do_api_common; pub mod kubernetes; @@ -105,11 +104,7 @@ impl CloudProvider for DO { let client = DigitalOcean::new(&self.token); match client { Ok(_x) => Ok(()), - Err(_) => { - return Err(EngineError::new_client_invalid_cloud_provider_credentials( - event_details, - )); - } + Err(_) => Err(EngineError::new_client_invalid_cloud_provider_credentials(event_details)), } } diff --git a/src/cloud_provider/digitalocean/models/do_api.rs b/src/cloud_provider/digitalocean/models/do_api.rs new file mode 100644 index 00000000..e69de29b diff --git a/src/cloud_provider/digitalocean/models/doks.rs b/src/cloud_provider/digitalocean/models/doks.rs index 0fa04c8e..e7a9394d 100644 --- a/src/cloud_provider/digitalocean/models/doks.rs +++ b/src/cloud_provider/digitalocean/models/doks.rs @@ -1,11 +1,11 @@ use serde::{Deserialize, Serialize}; -#[derive(Default, Serialize, Deserialize, PartialEq, Debug)] +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct DoksList { pub kubernetes_clusters: Vec, } -#[derive(Default, Serialize, Deserialize, PartialEq, Debug)] +#[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct KubernetesCluster { pub id: String, pub name: String, diff --git a/src/cloud_provider/digitalocean/network/load_balancer.rs b/src/cloud_provider/digitalocean/network/load_balancer.rs index ee9e2cd1..ace9ca02 100644 --- a/src/cloud_provider/digitalocean/network/load_balancer.rs +++ b/src/cloud_provider/digitalocean/network/load_balancer.rs @@ -143,7 +143,7 @@ mod tests_do_api_output { } } "#; - let ip_returned_from_api = get_ip_from_do_load_balancer_api_output(&json_content); + let ip_returned_from_api = get_ip_from_do_load_balancer_api_output(json_content); assert_eq!(ip_returned_from_api.unwrap().to_string(), "104.131.186.241"); } diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 86074690..76225984 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::models::vpc::{Vpc, Vpcs}; use crate::errors::CommandError; +use crate::models::digital_ocean::DoRegion; #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[serde(rename_all = "snake_case")] @@ -15,8 +15,8 @@ pub enum VpcInitKind { impl ToString for VpcInitKind { fn to_string(&self) -> String { match self { - &VpcInitKind::Autodetect => "autodetect".to_string(), - &VpcInitKind::Manual => "manual".to_string(), + VpcInitKind::Autodetect => "autodetect".to_string(), + VpcInitKind::Manual => "manual".to_string(), } } } @@ -129,7 +129,7 @@ fn do_get_vpcs_from_api_output(json_content: &str) -> Result, CommandEr Err(e) => { let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API"; Err(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e), + format!("{}, error: {}", message_safe, e), Some(message_safe.to_string()), )) } @@ -169,11 +169,11 @@ fn is_do_reserved_vpc_subnets(region: DoRegion, subnet: &str) -> bool { #[cfg(test)] mod tests_do_vpcs { - use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::network::vpc::{ do_get_vpcs_from_api_output, get_do_vpc_from_name, get_do_vpc_from_subnet, get_random_available_subnet, is_do_reserved_vpc_subnets, VpcInitKind, }; + use crate::models::digital_ocean::DoRegion; fn do_get_vpc_json() -> String { // https://developers.digitalocean.com/documentation/v2/#retrieve-an-existing-load-balancer @@ -286,11 +286,9 @@ mod tests_do_vpcs { // DO reserved subnet in the same region assert!(get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs.clone(), DoRegion::Frankfurt).is_err()); // DO reserved subnet in another region - assert!( - get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs, DoRegion::London) - .unwrap() - .is_none() - ); + assert!(get_do_vpc_from_subnet("10.19.0.0/16".to_string(), vpcs, DoRegion::London) + .unwrap() + .is_none()); } #[test] @@ -307,7 +305,7 @@ mod tests_do_vpcs { let json_content = do_get_vpc_json(); let existing_vpcs = do_get_vpcs_from_api_output(&json_content).unwrap(); - assert!(get_random_available_subnet(existing_vpcs.clone(), DoRegion::Frankfurt).is_ok()); + assert!(get_random_available_subnet(existing_vpcs, DoRegion::Frankfurt).is_ok()); } #[test] diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index 7eb80eb6..1bc93804 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -12,11 +12,11 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::logger::Logger; use ::function_name::named; -pub struct Router { +pub struct RouterDo { context: Context, id: String, action: Action, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterDo { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterDo { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterDo { fn context(&self) -> &Context { &self.context } @@ -134,8 +134,8 @@ impl Service for Router { context.insert("doks_cluster_id", kubernetes.id()); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -162,24 +162,19 @@ impl Service for Router { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) { - Some(application) => match application.private_port() { - Some(private_port) => Some(RouteDataTemplate { - path: r.path.clone(), - application_name: application.sanitized_name().to_string(), - application_port: private_port, - }), - _ => None, - }, + Some(application) => application.private_port().map(|private_port| RouteDataTemplate { + path: r.path.clone(), + application_name: application.sanitized_name(), + application_port: private_port, + }), _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); // autoscaler @@ -207,27 +202,21 @@ impl Service for Router { Some(hostname) => context.insert("external_ingress_hostname_default", hostname.as_str()), None => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe( - "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), - ), + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe( + "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), ), - ); + )); } }, _ => { // FIXME really? // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), + )); } } @@ -263,7 +252,7 @@ impl Service for Router { } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterDo { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -279,7 +268,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterDo { fn helm_selector(&self) -> Option { self.selector() } @@ -293,10 +282,7 @@ impl Helm for Router { } fn helm_chart_values_dir(&self) -> String { - format!( - "{}/digitalocean/chart_values/nginx-ingress", - self.context.lib_root_dir() - ) + format!("{}/digitalocean/chart_values/nginx-ingress", self.context.lib_root_dir()) } fn helm_chart_external_name_service_dir(&self) -> String { @@ -304,7 +290,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterDo { fn listeners(&self) -> &Listeners { &self.listeners } @@ -314,15 +300,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterDo { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterDo { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterDo { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -351,9 +341,9 @@ impl Create for Router { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -377,7 +367,7 @@ impl Create for Router { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e)) } @@ -400,19 +390,16 @@ impl Create for Router { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } @@ -438,7 +425,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterDo { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -472,7 +459,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterDo { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -484,7 +471,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -502,6 +489,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 11a1f96a..916e8759 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,4 +1,5 @@ -use crate::cloud_provider::service::{Action, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; +use crate::models::application::IApplication; use crate::unit_conversion::cpu_string_to_float; pub struct Environment { @@ -7,8 +8,10 @@ pub struct Environment { pub project_id: String, pub owner_id: String, pub organization_id: String, - pub stateless_services: Vec>, - pub stateful_services: Vec>, + pub action: Action, + pub applications: Vec>, + pub routers: Vec>, + pub databases: Vec>, } impl Environment { @@ -17,8 +20,10 @@ impl Environment { project_id: &str, owner_id: &str, organization_id: &str, - stateless_services: Vec>, - stateful_services: Vec>, + action: Action, + applications: Vec>, + routers: Vec>, + databases: Vec>, ) -> Self { Environment { namespace: format!("{}-{}", project_id, id), @@ -26,11 +31,41 @@ impl Environment { project_id: project_id.to_string(), owner_id: owner_id.to_string(), organization_id: organization_id.to_string(), - stateless_services, - stateful_services, + action, + applications, + routers, + databases, } } + pub fn stateless_services(&self) -> Vec<&dyn StatelessService> { + let mut stateless_services: Vec<&dyn StatelessService> = + Vec::with_capacity(self.applications.len() + self.routers.len()); + stateless_services.extend_from_slice( + self.applications + .iter() + .map(|x| x.as_stateless_service()) + .collect::>() + .as_slice(), + ); + stateless_services.extend_from_slice( + self.routers + .iter() + .map(|x| x.as_stateless_service()) + .collect::>() + .as_slice(), + ); + + stateless_services + } + + pub fn stateful_services(&self) -> Vec<&dyn StatefulService> { + self.databases + .iter() + .map(|x| x.as_stateful_service()) + .collect::>() + } + pub fn namespace(&self) -> &str { self.namespace.as_str() } @@ -41,10 +76,10 @@ impl Environment { pub fn required_resources(&self) -> EnvironmentResources { let mut total_cpu_for_stateless_services: f32 = 0.0; let mut total_ram_in_mib_for_stateless_services: u32 = 0; - let mut required_pods = self.stateless_services.len() as u32; + let mut required_pods = self.stateless_services().len() as u32; - for service in &self.stateless_services { - match *service.action() { + for service in self.stateless_services() { + match service.action() { Action::Create | Action::Nothing => { total_cpu_for_stateless_services += cpu_string_to_float(&service.total_cpus()); total_ram_in_mib_for_stateless_services += &service.total_ram_in_mib(); @@ -56,7 +91,7 @@ impl Environment { let mut total_cpu_for_stateful_services: f32 = 0.0; let mut total_ram_in_mib_for_stateful_services: u32 = 0; - for service in &self.stateful_services { + for service in self.stateful_services() { if service.is_managed_service() { // If it is a managed service, we don't care of its resources as it is not managed by us continue; diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index f78fe1c1..fb3be15b 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -11,6 +11,7 @@ use crate::errors::CommandError; use crate::utilities::calculate_hash; use semver::Version; use std::collections::HashMap; +use std::fmt::{Display, Formatter}; use std::path::Path; use std::{fs, thread}; use thread::spawn; @@ -35,9 +36,9 @@ pub enum HelmChartNamespaces { Custom, } -impl HelmChartNamespaces { - pub fn to_string(&self) -> String { - match self { +impl Display for HelmChartNamespaces { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let str = match self { HelmChartNamespaces::Custom => "custom", HelmChartNamespaces::KubeSystem => "kube-system", HelmChartNamespaces::Prometheus => "prometheus", @@ -45,8 +46,9 @@ impl HelmChartNamespaces { HelmChartNamespaces::CertManager => "cert-manager", HelmChartNamespaces::NginxIngress => "nginx-ingress", HelmChartNamespaces::Qovery => "qovery", - } - .to_string() + }; + + f.write_str(str) } } @@ -153,10 +155,8 @@ pub trait HelmChart: Send { let chart = self.get_chart_info(); for file in chart.values_files.iter() { if let Err(e) = fs::metadata(file) { - let safe_message = format!( - "Can't access helm chart override file `{}` for chart `{}`", - file, chart.name, - ); + let safe_message = + format!("Can't access helm chart override file `{}` for chart `{}`", file, chart.name,); return Err(CommandError::new( format!("{}, error: {:?}", safe_message, e), Some(safe_message), @@ -201,16 +201,16 @@ pub trait HelmChart: Send { fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result, CommandError> { info!("prepare and deploy chart {}", &self.get_chart_info().name); let payload = self.check_prerequisites()?; - let payload = self.pre_exec(&kubernetes_config, &envs, payload)?; - let payload = match self.exec(&kubernetes_config, &envs, payload.clone()) { + let payload = self.pre_exec(kubernetes_config, envs, payload)?; + let payload = match self.exec(kubernetes_config, envs, payload.clone()) { Ok(payload) => payload, Err(e) => { error!("Error while deploying chart: {}", e.message()); - self.on_deploy_failure(&kubernetes_config, &envs, payload)?; + self.on_deploy_failure(kubernetes_config, envs, payload)?; return Err(e); } }; - let payload = self.post_exec(&kubernetes_config, &envs, payload)?; + let payload = self.post_exec(kubernetes_config, envs, payload)?; Ok(payload) } @@ -226,18 +226,18 @@ pub trait HelmChart: Send { match chart_info.action { HelmAction::Deploy => { - if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) { + if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { warn!( "error while trying to destroy chart if breaking change is detected: {:?}", e.to_string() ); } - helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?; + helm.upgrade(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Destroy => { let chart_info = self.get_chart_info(); - helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?; + helm.uninstall(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Skip => {} } @@ -306,7 +306,7 @@ fn deploy_parallel_charts( Err(e) => { let safe_message = "Thread panicked during parallel charts deployments."; let error = Err(CommandError::new( - format!("{}, error: {:?}", safe_message.to_string(), e), + format!("{}, error: {:?}", safe_message, e), Some(safe_message.to_string()), )); errors.push(error); @@ -324,7 +324,7 @@ fn deploy_parallel_charts( pub fn deploy_charts_levels( kubernetes_config: &Path, - envs: &Vec<(String, String)>, + envs: &[(String, String)], charts: Vec>>, dry_run: bool, ) -> Result<(), CommandError> { @@ -338,7 +338,7 @@ pub fn deploy_charts_levels( let chart_info = chart.get_chart_info(); // don't do diff on destroy or skip if chart_info.action == HelmAction::Deploy { - let _ = helm.upgrade_diff(chart_info, &vec![]); + let _ = helm.upgrade_diff(chart_info, &[]); } } @@ -347,7 +347,7 @@ pub fn deploy_charts_levels( continue; } - if let Err(e) = deploy_parallel_charts(&kubernetes_config, &envs, level) { + if let Err(e) = deploy_parallel_charts(kubernetes_config, envs, level) { return Err(e); } } @@ -442,13 +442,13 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "annotate", "--overwrite", - &kind, + kind, &self.chart_info.name, format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(), ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; kubectl_exec_with_output( vec![ @@ -456,13 +456,13 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "annotate", "--overwrite", - &kind, + kind, &self.chart_info.name, "meta.helm.sh/release-namespace=kube-system", ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; kubectl_exec_with_output( vec![ @@ -470,13 +470,13 @@ impl HelmChart for CoreDNSConfigChart { "kube-system", "label", "--overwrite", - &kind, + kind, &self.chart_info.name, "app.kubernetes.io/managed-by=Helm", ], environment_variables.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; Ok(()) }; @@ -490,7 +490,7 @@ impl HelmChart for CoreDNSConfigChart { fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result, CommandError> { info!("prepare and deploy chart {}", &self.get_chart_info().name); self.check_prerequisites()?; - let payload = match self.pre_exec(&kubernetes_config, &envs, None) { + let payload = match self.pre_exec(kubernetes_config, envs, None) { Ok(p) => match p { None => { return Err(CommandError::new_from_safe_message( @@ -501,12 +501,12 @@ impl HelmChart for CoreDNSConfigChart { }, Err(e) => return Err(e), }; - if let Err(e) = self.exec(&kubernetes_config, &envs, None) { + if let Err(e) = self.exec(kubernetes_config, envs, None) { error!("Error while deploying chart: {:?}", e.message()); - self.on_deploy_failure(&kubernetes_config, &envs, None)?; + self.on_deploy_failure(kubernetes_config, envs, None)?; return Err(e); }; - self.post_exec(&kubernetes_config, &envs, Some(payload))?; + self.post_exec(kubernetes_config, envs, Some(payload))?; Ok(None) } @@ -594,19 +594,19 @@ impl HelmChart for PrometheusOperatorConfigChart { match chart_info.action { HelmAction::Deploy => { - if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) { + if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &[]) { warn!( "error while trying to destroy chart if breaking change is detected: {}", e.to_string() ); } - helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?; + helm.upgrade(chart_info, &[]).map_err(to_command_error)?; } HelmAction::Destroy => { let chart_info = self.get_chart_info(); - if helm.check_release_exist(&chart_info, &vec![]).is_ok() { - helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?; + if helm.check_release_exist(chart_info, &[]).is_ok() { + helm.uninstall(chart_info, &[]).map_err(to_command_error)?; let prometheus_crds = [ "prometheuses.monitoring.coreos.com", diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 7cdadc7f..e1e90163 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -31,11 +31,11 @@ use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter}; use crate::fs::workspace_directory; -use crate::logger::{LogLevel, Logger}; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, }; +use crate::logger::Logger; use crate::object_storage::ObjectStorage; use crate::unit_conversion::{any_to_mi, cpu_string_to_float}; @@ -72,7 +72,7 @@ pub trait Kubernetes: Listen { QoveryIdentifier::from(context.organization_id().to_string()), QoveryIdentifier::from(context.cluster_id().to_string()), QoveryIdentifier::from(context.execution_id().to_string()), - Some(self.region().to_string()), + Some(self.region()), stage, Transmitter::Kubernetes(self.id().to_string(), self.name().to_string()), ) @@ -97,19 +97,13 @@ pub trait Kubernetes: Listen { match File::open(&local_kubeconfig_generated) { Ok(_) => Some(local_kubeconfig_generated), Err(err) => { - self.logger().log( - LogLevel::Debug, - EngineEvent::Debug( - self.get_event_details(stage.clone()), - EventMessage::new( - err.to_string(), - Some( - format!("Error, couldn't open {} file", &local_kubeconfig_generated,) - .to_string(), - ), - ), + self.logger().log(EngineEvent::Debug( + self.get_event_details(stage.clone()), + EventMessage::new( + err.to_string(), + Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), ), - ); + )); None } } @@ -136,17 +130,10 @@ pub trait Kubernetes: Listen { Ok((path, file)) => (path, file), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), - CommandError::new_from_safe_message( - format!( - "Error getting file from store, error: {}", - err.message.unwrap_or_else(|| "no details.".to_string()) - ) - .to_string(), - ), + self.get_event_details(stage), + err.into(), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } } @@ -157,13 +144,10 @@ pub trait Kubernetes: Listen { Ok(metadata) => metadata, Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), - CommandError::new_from_safe_message( - format!("Error getting file metadata, error: {}", err.to_string(),).to_string(), - ), + self.get_event_details(stage), + CommandError::new_from_safe_message(format!("Error getting file metadata, error: {}", err,)), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; @@ -172,14 +156,10 @@ pub trait Kubernetes: Listen { permissions.set_mode(0o400); if let Err(err) = std::fs::set_permissions(string_path.as_str(), permissions) { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), - CommandError::new_from_safe_message(format!( - "Error setting file permissions, error: {}", - err.to_string(), - )), + self.get_event_details(stage), + CommandError::new_from_safe_message(format!("Error setting file permissions, error: {}", err,)), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -203,13 +183,13 @@ pub trait Kubernetes: Listen { Err(err) => { let error = EngineError::new_cannot_get_cluster_nodes( self.get_event_details(stage), - CommandError::new_from_safe_message( - format!("Error while trying to get cluster nodes, error: {}", err.message()).to_string(), - ), + CommandError::new_from_safe_message(format!( + "Error while trying to get cluster nodes, error: {}", + err.message() + )), ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -257,9 +237,9 @@ pub trait Kubernetes: Listen { Err(e) => Err(e), Ok(..) => match is_kubernetes_upgrade_required( kubeconfig, - &self.version(), + self.version(), self.cloud_provider().credentials_environment_variables(), - event_details.clone(), + event_details, self.logger(), ) { Ok(x) => self.upgrade_with_status(x), @@ -360,7 +340,7 @@ pub trait Kubernetes: Listen { envs.clone(), ) { return Err(EngineError::new_k8s_cannot_delete_pod( - event_details.clone(), + event_details, pod.metadata.name.to_string(), e, )); @@ -368,10 +348,7 @@ pub trait Kubernetes: Listen { } } Err(e) => { - return Err(EngineError::new_k8s_cannot_get_crash_looping_pods( - event_details.clone(), - e, - )); + return Err(EngineError::new_k8s_cannot_get_crash_looping_pods(event_details, e)); } }, }; @@ -440,7 +417,7 @@ pub fn deploy_environment( }; // create all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.exec_action(&stateful_deployment_target), kubernetes, @@ -477,7 +454,7 @@ pub fn deploy_environment( }; // create all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.exec_action(&stateless_deployment_target), kubernetes, @@ -495,13 +472,13 @@ pub fn deploy_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.exec_check_action(), kubernetes, service, event_details.clone(), - logger.clone(), + logger, &stateless_deployment_target, &listeners_helper, "check deployment", @@ -512,7 +489,7 @@ pub fn deploy_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.exec_check_action(), kubernetes, @@ -553,7 +530,7 @@ pub fn deploy_environment_error( }; // clean up all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_create_error(&stateful_deployment_target), kubernetes, @@ -577,7 +554,7 @@ pub fn deploy_environment_error( }; // clean up all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_create_error(&stateless_deployment_target), kubernetes, @@ -615,7 +592,7 @@ pub fn pause_environment( }; // create all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_pause(&stateless_deployment_target), kubernetes, @@ -633,7 +610,7 @@ pub fn pause_environment( thread::sleep(std::time::Duration::from_millis(100)); // create all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_pause(&stateful_deployment_target), kubernetes, @@ -650,7 +627,7 @@ pub fn pause_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_pause_check(), kubernetes, @@ -668,7 +645,7 @@ pub fn pause_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_pause_check(), kubernetes, @@ -706,7 +683,7 @@ pub fn delete_environment( }; // delete all stateless services (router, application...) - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_delete(&stateful_deployment_target), kubernetes, @@ -724,7 +701,7 @@ pub fn delete_environment( thread::sleep(std::time::Duration::from_millis(100)); // delete all stateful services (database) - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_delete(&stateful_deployment_target), kubernetes, @@ -741,7 +718,7 @@ pub fn delete_environment( // Quick fix: adding 100 ms delay to avoid race condition on service status update thread::sleep(std::time::Duration::from_millis(100)); - for service in &environment.stateless_services { + for service in environment.stateless_services() { let _ = service::check_kubernetes_service_error( service.on_delete_check(), kubernetes, @@ -759,7 +736,7 @@ pub fn delete_environment( thread::sleep(std::time::Duration::from_millis(100)); // check all deployed services - for service in &environment.stateful_services { + for service in environment.stateful_services() { let _ = service::check_kubernetes_service_error( service.on_delete_check(), kubernetes, @@ -776,7 +753,7 @@ pub fn delete_environment( // do not catch potential error - to confirm let _ = kubectl::kubectl_exec_delete_namespace( kubernetes.get_kubeconfig_file_path()?, - &environment.namespace(), + environment.namespace(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -806,20 +783,17 @@ where for object in cert_manager_objects { // check resource exist first if let Err(e) = kubectl_exec_count_all_objects(&kubernetes_config, object, envs.clone()) { - logger.log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new( - format!( - "Encountering issues while trying to get objects kind {}: {:?}", - object, - e.message() - ), - None, + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Encountering issues while trying to get objects kind {}: {:?}", + object, + e.message() ), + None, ), - ); + )); continue; } @@ -829,13 +803,10 @@ where || match kubectl_delete_objects_in_all_namespaces(&kubernetes_config, object, envs.clone()) { Ok(_) => OperationResult::Ok(()), Err(e) => { - logger.log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(format!("Failed to delete all {} objects, retrying...", object,), None), - ), - ); + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Failed to delete all {} objects, retrying...", object,), None), + )); OperationResult::Retry(e) } }, @@ -843,7 +814,7 @@ where Ok(_) => {} Err(Operation { error, .. }) => { return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details.clone(), + event_details, "Cert-Manager".to_string(), object.to_string(), error, @@ -851,7 +822,7 @@ where } Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details.clone(), + event_details, "Cert-Manager".to_string(), object.to_string(), CommandError::new_from_safe_message(msg), @@ -876,19 +847,14 @@ where // check master versions let v = match kubectl_exec_version(&kubernetes_config, envs.clone()) { Ok(v) => v, - Err(e) => { - return Err(EngineError::new_cannot_execute_k8s_exec_version( - event_details.clone(), - e, - )) - } + Err(e) => return Err(EngineError::new_cannot_execute_k8s_exec_version(event_details, e)), }; let raw_version = format!("{}.{}", v.server_version.major, v.server_version.minor); let masters_version = match VersionsNumber::from_str(raw_version.as_str()) { Ok(vn) => vn, Err(_) => { return Err(EngineError::new_cannot_determine_k8s_master_version( - event_details.clone(), + event_details, raw_version.to_string(), )) } @@ -898,7 +864,7 @@ where let mut workers_version: Vec = vec![]; let nodes = match kubectl_exec_get_node(kubernetes_config, envs) { Ok(n) => n, - Err(e) => return Err(EngineError::new_cannot_get_cluster_nodes(event_details.clone(), e)), + Err(e) => return Err(EngineError::new_cannot_get_cluster_nodes(event_details, e)), }; for node in nodes.items { @@ -907,7 +873,7 @@ where Ok(vn) => workers_version.push(vn), Err(_) => { return Err(EngineError::new_cannot_determine_k8s_kubelet_worker_version( - event_details.clone(), + event_details, node.status.node_info.kubelet_version.to_string(), )) } @@ -918,20 +884,14 @@ where Ok(vn) => workers_version.push(vn), Err(_) => { return Err(EngineError::new_cannot_determine_k8s_kube_proxy_version( - event_details.clone(), + event_details, node.status.node_info.kube_proxy_version.to_string(), )) } } } - check_kubernetes_upgrade_status( - requested_version, - masters_version, - workers_version, - event_details.clone(), - logger, - ) + check_kubernetes_upgrade_status(requested_version, masters_version, workers_version, event_details, logger) } pub fn is_kubernetes_upgradable

( @@ -949,7 +909,7 @@ where for pdb in pdbs.items.unwrap() { if pdb.status.current_healthy < pdb.status.desired_healthy { return Err(EngineError::new_k8s_pod_disruption_budget_invalid_state( - event_details.clone(), + event_details, pdb.metadata.name, )); } @@ -957,12 +917,7 @@ where Ok(()) } }, - Err(err) => { - return Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget( - event_details.clone(), - err, - )); - } + Err(err) => Err(EngineError::new_k8s_cannot_retrieve_pods_disruption_budget(event_details, err)), } } @@ -985,7 +940,7 @@ where )); } } - return OperationResult::Ok(()); + OperationResult::Ok(()) } } }); @@ -1020,16 +975,16 @@ where )); } } - return OperationResult::Ok(()); + OperationResult::Ok(()) } } }); - return match result { + match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), Err(retry::Error::Internal(e)) => Err(CommandError::new_from_safe_message(e)), - }; + } } #[derive(Debug, PartialEq)] @@ -1073,7 +1028,7 @@ fn check_kubernetes_upgrade_status( Ok(v) => v, Err(e) => { return Err(EngineError::new_cannot_determine_k8s_requested_upgrade_version( - event_details.clone(), + event_details, requested_version.to_string(), Some(e), )); @@ -1084,10 +1039,7 @@ fn check_kubernetes_upgrade_status( match compare_kubernetes_cluster_versions_for_upgrade(&deployed_masters_version, &wished_version) { Ok(x) => { if let Some(msg) = x.message { - logger.log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(msg)), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg))); }; if x.older_version_detected { older_masters_version_detected = x.older_version_detected; @@ -1099,7 +1051,7 @@ fn check_kubernetes_upgrade_status( Err(e) => { return Err( EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details.clone(), + event_details, deployed_masters_version, wished_version, e, @@ -1110,15 +1062,12 @@ fn check_kubernetes_upgrade_status( // check workers versions if deployed_workers_version.is_empty() { - logger.log( - LogLevel::Warning, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "No worker nodes found, can't check if upgrade is required for workers".to_string(), - ), + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No worker nodes found, can't check if upgrade is required for workers".to_string(), ), - ); + )); return Ok(KubernetesUpgradeStatus { required_upgrade_on, @@ -1152,7 +1101,7 @@ fn check_kubernetes_upgrade_status( Err(e) => { return Err( EngineError::new_k8s_version_upgrade_deployed_vs_requested_versions_inconsistency( - event_details.clone(), + event_details, node, wished_version, e, @@ -1162,22 +1111,19 @@ fn check_kubernetes_upgrade_status( } } - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(match &required_upgrade_on { - None => "All workers are up to date, no upgrade required".to_string(), - Some(node_type) => match node_type { - KubernetesNodesType::Masters => "Kubernetes master upgrade required".to_string(), - KubernetesNodesType::Workers => format!( - "Kubernetes workers upgrade required, need to update {}/{} nodes", - non_up_to_date_workers, total_workers - ), - }, - }), - ), - ); + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe(match &required_upgrade_on { + None => "All workers are up to date, no upgrade required".to_string(), + Some(node_type) => match node_type { + KubernetesNodesType::Masters => "Kubernetes master upgrade required".to_string(), + KubernetesNodesType::Workers => format!( + "Kubernetes workers upgrade required, need to update {}/{} nodes", + non_up_to_date_workers, total_workers + ), + }, + }), + )); Ok(KubernetesUpgradeStatus { required_upgrade_on, @@ -1235,12 +1181,12 @@ pub fn compare_kubernetes_cluster_versions_for_upgrade( messages.push("Older Kubernetes major version detected"); } - if &wished_minor_version > &deployed_minor_version { + if wished_minor_version > deployed_minor_version { upgrade_required.upgraded_required = true; messages.push("Kubernetes minor version change detected"); } - if &wished_minor_version < &deployed_minor_version { + if wished_minor_version < deployed_minor_version { upgrade_required.upgraded_required = false; upgrade_required.older_version_detected = true; messages.push("Older Kubernetes minor version detected"); @@ -1330,9 +1276,7 @@ where { let listeners = std::clone::Clone::clone(kubernetes.listeners()); let logger = kubernetes.logger().clone_dyn(); - let event_details = kubernetes - .get_event_details(Stage::Infrastructure(InfrastructureStep::Create)) - .clone(); + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); let progress_info = ProgressInfo::new( ProgressScope::Infrastructure { @@ -1353,7 +1297,7 @@ where let listeners_helper = ListenersHelper::new(&listeners); let action = action; let progress_info = progress_info; - let waiting_message = waiting_message.unwrap_or("no message ...".to_string()); + let waiting_message = waiting_message.unwrap_or_else(|| "no message ...".to_string()); loop { // do notify users here @@ -1364,42 +1308,33 @@ where match action { Action::Create => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Create), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Create), ), - ); + event_message, + )); } Action::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Pause), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Pause), ), - ); + event_message, + )); } Action::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - EventDetails::clone_changing_stage( - event_details, - Stage::Infrastructure(InfrastructureStep::Delete), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Infrastructure(InfrastructureStep::Delete), ), - ); + event_message, + )); } Action::Nothing => {} // should not happens }; @@ -1448,10 +1383,7 @@ pub fn validate_k8s_required_cpu_and_burstable( context_id, )); - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(message)), - ); + logger.log(EngineEvent::Warning(event_details, EventMessage::new_from_safe(message))); set_cpu_burst = total_cpu.clone(); } @@ -1502,8 +1434,8 @@ mod tests { use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd::structs::{KubernetesList, KubernetesNode, KubernetesVersion}; use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter}; + use crate::io_models::{ListenersHelper, QoveryIdentifier}; use crate::logger::StdIoLogger; - use crate::models::{ListenersHelper, QoveryIdentifier}; #[test] pub fn check_kubernetes_upgrade_method() { @@ -1579,7 +1511,7 @@ mod tests { "1.17", version_1_17.clone(), vec![version_1_17.clone(), version_1_16.clone()], - event_details.clone(), + event_details, &logger, ) .unwrap(); @@ -1596,7 +1528,7 @@ mod tests { "Provider version: {} | Wished version: {} | Is upgrade required: {:?}", provider_version.clone(), provider.clone(), - compare_kubernetes_cluster_versions_for_upgrade(&provider_version, &provider) + compare_kubernetes_cluster_versions_for_upgrade(provider_version, provider) .unwrap() .message ) @@ -2076,8 +2008,8 @@ mod tests { let milli_cpu = "250m".to_string(); let int_cpu = "2".to_string(); - assert_eq!(convert_k8s_cpu_value_to_f32(milli_cpu).unwrap(), 0.25 as f32); - assert_eq!(convert_k8s_cpu_value_to_f32(int_cpu).unwrap(), 2 as f32); + assert_eq!(convert_k8s_cpu_value_to_f32(milli_cpu).unwrap(), 0.25_f32); + assert_eq!(convert_k8s_cpu_value_to_f32(int_cpu).unwrap(), 2_f32); } #[test] @@ -2092,9 +2024,9 @@ mod tests { let event_details = EventDetails::new( Some(Aws), - QoveryIdentifier::new(organization_id.to_string()), - QoveryIdentifier::new(cluster_id.to_string()), - QoveryIdentifier::new(execution_id.to_string()), + QoveryIdentifier::new_from_long_id(organization_id.to_string()), + QoveryIdentifier::new_from_long_id(cluster_id.to_string()), + QoveryIdentifier::new_from_long_id(execution_id.to_string()), Some("region_fake".to_string()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(cluster_id.to_string(), format!("{}-name", cluster_id)), @@ -2128,7 +2060,7 @@ mod tests { context_id, total_cpu, cpu_burst, - event_details.clone(), + event_details, &logger ) .unwrap(), diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index eb9a412a..650b1d09 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -7,7 +7,7 @@ use crate::cloud_provider::environment::Environment; use crate::cloud_provider::kubernetes::Kubernetes; use crate::errors::EngineError; use crate::events::{EventDetails, Stage, ToTransmitter}; -use crate::models::{Context, Listen}; +use crate::io_models::{Context, Listen}; pub mod aws; pub mod digitalocean; diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index 1c9c24f9..d9a27611 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -63,13 +63,13 @@ pub fn get_qovery_app_version( Ok(x) => match x.json::() { Ok(qa) => Ok(qa), Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )), }, Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )), } } diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs deleted file mode 100644 index 7405d0bf..00000000 --- a/src/cloud_provider/scaleway/application.rs +++ /dev/null @@ -1,633 +0,0 @@ -use std::fmt; -use std::str::FromStr; - -use tera::Context as TeraContext; - -use crate::build_platform::Image; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm, - Pause, Service, ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; - -pub struct Application { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl Application { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - image: Image, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - Application { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - image, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl crate::cloud_provider::service::Application for Application { - fn image(&self) -> &Image { - &self.image - } - - fn set_image(&mut self, image: Image) { - self.image = image; - } -} - -impl Helm for Application { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/scaleway/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for Application {} - -impl ToTransmitter for Application { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for Application { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.image().commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - - match &self.image().registry_url { - Some(registry_url) => context.insert( - "image_name_with_tag", - format!("{}/{}", registry_url.as_str(), self.image().name_with_tag()).as_str(), - ), - None => { - let image_name_with_tag = self.image().name_with_tag(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "there is no registry url, use image name with tag with the default container registry: {}", - image_name_with_tag.as_str() - )), - ), - ); - context.insert("image_name_with_tag", image_name_with_tag.as_str()); - } - } - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - - match self.image.registry_name.as_ref() { - Some(_) => { - context.insert("is_registry_secret", &true); - context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); - } - None => { - context.insert("is_registry_secret", &false); - } - }; - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - &self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details.clone(), - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - // TODO(benjaminch): Switch to proper storage class - // Note: Seems volume storage type are not supported, only blocked storage for the time being - // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass - StorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", - StorageType::LocalSsd => "l_ssd", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) - } - - // container registry credentials - context.insert( - "container_registry_docker_json_config", - self.image - .clone() - .registry_docker_json_config - .unwrap_or("".to_string()) - .as_str(), - ); - - Ok(context) - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } -} - -impl Create for Application { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for Application { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application( - target, - self, - 0, - if self.is_stateful() { Statefulset } else { Deployment }, - ) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for Application { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, false, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, true, event_details.clone()) - }) - } -} - -impl Listen for Application { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] -pub enum StorageType { - #[serde(rename = "b_ssd")] - BlockSsd, - #[serde(rename = "l_ssd")] - LocalSsd, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwRegion { - Paris, - Amsterdam, - Warsaw, -} - -impl ScwRegion { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwRegion::Paris => "fr-par", - ScwRegion::Amsterdam => "nl-ams", - ScwRegion::Warsaw => "pl-waw", - } - } -} - -impl fmt::Display for ScwRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwRegion::Paris => write!(f, "fr-par"), - ScwRegion::Amsterdam => write!(f, "nl-ams"), - ScwRegion::Warsaw => write!(f, "pl-waw"), - } - } -} - -impl FromStr for ScwRegion { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "fr-par" => Ok(ScwRegion::Paris), - "nl-ams" => Ok(ScwRegion::Amsterdam), - "pl-waw" => Ok(ScwRegion::Warsaw), - _ => Err(()), - } - } -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwZone { - Paris1, - Paris2, - Paris3, - Amsterdam1, - Warsaw1, -} - -impl ScwZone { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwZone::Paris1 => "fr-par-1", - ScwZone::Paris2 => "fr-par-2", - ScwZone::Paris3 => "fr-par-3", - ScwZone::Amsterdam1 => "nl-ams-1", - ScwZone::Warsaw1 => "pl-waw-1", - } - } - - pub fn region(&self) -> ScwRegion { - match self { - ScwZone::Paris1 => ScwRegion::Paris, - ScwZone::Paris2 => ScwRegion::Paris, - ScwZone::Paris3 => ScwRegion::Paris, - ScwZone::Amsterdam1 => ScwRegion::Amsterdam, - ScwZone::Warsaw1 => ScwRegion::Warsaw, - } - } - - // TODO(benjaminch): improve / refactor this! - pub fn region_str(&self) -> String { - match self { - ScwZone::Paris1 => "fr-par", - ScwZone::Paris2 => "fr-par", - ScwZone::Paris3 => "fr-par", - ScwZone::Amsterdam1 => "nl-ams", - ScwZone::Warsaw1 => "pl-waw", - } - .to_string() - } -} - -impl fmt::Display for ScwZone { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwZone::Paris1 => write!(f, "fr-par-1"), - ScwZone::Paris2 => write!(f, "fr-par-2"), - ScwZone::Paris3 => write!(f, "fr-par-3"), - ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), - ScwZone::Warsaw1 => write!(f, "pl-waw-1"), - } - } -} - -impl FromStr for ScwZone { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "fr-par-1" => Ok(ScwZone::Paris1), - "fr-par-2" => Ok(ScwZone::Paris2), - "fr-par-3" => Ok(ScwZone::Paris3), - "nl-ams-1" => Ok(ScwZone::Amsterdam1), - "pl-waw-1" => Ok(ScwZone::Warsaw1), - _ => { - return Err(CommandError::new_from_safe_message(format!( - "`{}` zone is not supported", - s - ))); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{ScwRegion, ScwZone}; - use std::str::FromStr; - - #[test] - fn test_region_to_str() { - assert_eq!("fr-par", ScwRegion::Paris.as_str()); - assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); - assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); - } - - #[test] - fn test_region_from_str() { - assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); - assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); - assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); - } - - #[test] - fn test_zone_to_str() { - assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); - assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); - assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); - assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); - assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); - } - - #[test] - fn test_zone_from_str() { - assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); - assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); - assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); - assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); - assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); - } - - #[test] - fn test_zone_region() { - assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); - assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); - } -} diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index dbef1c9b..f1b39561 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct MongoDB { +pub struct MongoDbScw { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct MongoDB { logger: Box, } -impl MongoDB { +impl MongoDbScw { pub fn new( context: Context, id: &str, @@ -48,7 +48,7 @@ impl MongoDB { listeners: Listeners, logger: Box, ) -> Self { - MongoDB { + MongoDbScw { context, action, id: id.to_string(), @@ -83,23 +83,23 @@ impl MongoDB { } } -impl StatefulService for MongoDB { +impl StatefulService for MongoDbScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MongoDB { +impl ToTransmitter for MongoDbScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for MongoDB { +impl Service for MongoDbScw { fn context(&self) -> &Context { &self.context } @@ -173,7 +173,7 @@ impl Service for MongoDB { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -181,7 +181,7 @@ impl Service for MongoDB { context.insert("namespace", environment.namespace()); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); context.insert("version", &version); @@ -194,10 +194,7 @@ impl Service for MongoDB { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_db_name", self.name.as_str()); context.insert("database_login", self.options.login.as_str()); @@ -215,10 +212,7 @@ impl Service for MongoDB { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) @@ -233,9 +227,9 @@ impl Service for MongoDB { } } -impl Database for MongoDB {} +impl Database for MongoDbScw {} -impl Helm for MongoDB { +impl Helm for MongoDbScw { fn helm_selector(&self) -> Option { self.selector() } @@ -257,7 +251,7 @@ impl Helm for MongoDB { } } -impl Terraform for MongoDB { +impl Terraform for MongoDbScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -267,7 +261,7 @@ impl Terraform for MongoDB { } } -impl Create for MongoDB { +impl Create for MongoDbScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -287,12 +281,7 @@ impl Create for MongoDB { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -310,7 +299,7 @@ impl Create for MongoDB { } } -impl Pause for MongoDB { +impl Pause for MongoDbScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -348,7 +337,7 @@ impl Pause for MongoDB { } } -impl Delete for MongoDB { +impl Delete for MongoDbScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -385,7 +374,7 @@ impl Delete for MongoDB { } } -impl Listen for MongoDB { +impl Listen for MongoDbScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index 4f6472c3..6d33eb8f 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -13,13 +13,13 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; -pub struct MySQL { +pub struct MySQLScw { context: Context, id: String, action: Action, @@ -35,7 +35,7 @@ pub struct MySQL { logger: Box, } -impl MySQL { +impl MySQLScw { pub fn new( context: Context, id: &str, @@ -110,23 +110,23 @@ impl MySQL { } } -impl StatefulService for MySQL { +impl StatefulService for MySQLScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for MySQL { +impl ToTransmitter for MySQLScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for MySQL { +impl Service for MySQLScw { fn context(&self) -> &Context { &self.context } @@ -199,7 +199,7 @@ impl Service for MySQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -207,7 +207,7 @@ impl Service for MySQL { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version(); context.insert("version_major", &version.to_major_version_string()); context.insert("version", &version.to_string()); // Scaleway needs to have major version only @@ -220,10 +220,7 @@ impl Service for MySQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -246,27 +243,24 @@ impl Service for MySQL { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("skip_final_snapshot", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for MySQL {} +impl Database for MySQLScw {} -impl Helm for MySQL { +impl Helm for MySQLScw { fn helm_selector(&self) -> Option { self.selector() } @@ -288,7 +282,7 @@ impl Helm for MySQL { } } -impl Terraform for MySQL { +impl Terraform for MySQLScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -298,7 +292,7 @@ impl Terraform for MySQL { } } -impl Create for MySQL { +impl Create for MySQLScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -318,12 +312,7 @@ impl Create for MySQL { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -342,7 +331,7 @@ impl Create for MySQL { } } -impl Pause for MySQL { +impl Pause for MySQLScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -380,7 +369,7 @@ impl Pause for MySQL { } } -impl Delete for MySQL { +impl Delete for MySQLScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -417,7 +406,7 @@ impl Delete for MySQL { } } -impl Listen for MySQL { +impl Listen for MySQLScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index c5b30b5e..d101ecbc 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -13,13 +13,13 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; -pub struct PostgreSQL { +pub struct PostgresScw { context: Context, id: String, action: Action, @@ -35,7 +35,7 @@ pub struct PostgreSQL { logger: Box, } -impl PostgreSQL { +impl PostgresScw { pub fn new( context: Context, id: &str, @@ -119,23 +119,23 @@ impl PostgreSQL { } } -impl StatefulService for PostgreSQL { +impl StatefulService for PostgresScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for PostgreSQL { +impl ToTransmitter for PostgresScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for PostgreSQL { +impl Service for PostgresScw { fn context(&self) -> &Context { &self.context } @@ -208,7 +208,7 @@ impl Service for PostgreSQL { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); @@ -216,7 +216,7 @@ impl Service for PostgreSQL { context.insert("namespace", environment.namespace()); let version = &self - .matching_correct_version(self.is_managed_service(), event_details.clone())? + .matching_correct_version(self.is_managed_service(), event_details)? .matched_version(); context.insert("version_major", &version.to_major_version_string()); context.insert("version", &version.to_string()); // Scaleway needs to have major version only @@ -229,10 +229,7 @@ impl Service for PostgreSQL { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_name", self.sanitized_name().as_str()); context.insert("database_db_name", self.name()); @@ -255,27 +252,24 @@ impl Service for PostgreSQL { context.insert("delete_automated_backups", &self.context().is_test_cluster()); context.insert("skip_final_snapshot", &self.context().is_test_cluster()); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for PostgreSQL {} +impl Database for PostgresScw {} -impl Helm for PostgreSQL { +impl Helm for PostgresScw { fn helm_selector(&self) -> Option { self.selector() } @@ -297,7 +291,7 @@ impl Helm for PostgreSQL { } } -impl Terraform for PostgreSQL { +impl Terraform for PostgresScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -307,7 +301,7 @@ impl Terraform for PostgreSQL { } } -impl Create for PostgreSQL { +impl Create for PostgresScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -327,12 +321,7 @@ impl Create for PostgreSQL { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -351,7 +340,7 @@ impl Create for PostgreSQL { } } -impl Pause for PostgreSQL { +impl Pause for PostgresScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -389,7 +378,7 @@ impl Pause for PostgreSQL { } } -impl Delete for PostgreSQL { +impl Delete for PostgresScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -426,7 +415,7 @@ impl Delete for PostgreSQL { } } -impl Listen for PostgreSQL { +impl Listen for PostgresScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 4643cc7b..528152da 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -11,12 +11,12 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; -pub struct Redis { +pub struct RedisScw { context: Context, id: String, action: Action, @@ -32,7 +32,7 @@ pub struct Redis { logger: Box, } -impl Redis { +impl RedisScw { pub fn new( context: Context, id: &str, @@ -83,23 +83,23 @@ impl Redis { } } -impl StatefulService for Redis { +impl StatefulService for RedisScw { + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + fn is_managed_service(&self) -> bool { self.options.mode == MANAGED } } -impl ToTransmitter for Redis { +impl ToTransmitter for RedisScw { fn to_transmitter(&self) -> Transmitter { - Transmitter::Database( - self.id().to_string(), - self.service_type().to_string(), - self.name().to_string(), - ) + Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) } } -impl Service for Redis { +impl Service for RedisScw { fn context(&self) -> &Context { &self.context } @@ -172,13 +172,13 @@ impl Service for Redis { context.insert("kubeconfig_path", &kube_config_file_path); kubectl::kubectl_exec_create_namespace_without_labels( - &environment.namespace(), + environment.namespace(), kube_config_file_path.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); let version = self - .matching_correct_version(event_details.clone())? + .matching_correct_version(event_details)? .matched_version() .to_string(); @@ -193,10 +193,7 @@ impl Service for Redis { context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert( - "fqdn", - self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(), - ); + context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); context.insert("database_login", self.options.login.as_str()); context.insert("database_password", self.options.password.as_str()); @@ -213,27 +210,24 @@ impl Service for Redis { context.insert("publicly_accessible", &self.options.publicly_accessible); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } Ok(context) } - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("app={}", self.sanitized_name())) + } } -impl Database for Redis {} +impl Database for RedisScw {} -impl Helm for Redis { +impl Helm for RedisScw { fn helm_selector(&self) -> Option { self.selector() } @@ -255,7 +249,7 @@ impl Helm for Redis { } } -impl Terraform for Redis { +impl Terraform for RedisScw { fn terraform_common_resource_dir_path(&self) -> String { format!("{}/scaleway/services/common", self.context.lib_root_dir()) } @@ -265,7 +259,7 @@ impl Terraform for Redis { } } -impl Create for Redis { +impl Create for RedisScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -285,12 +279,7 @@ impl Create for Redis { fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains( - self.listeners.clone(), - vec![self.fqdn.as_str()], - event_details, - self.logger(), - ) + self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) } #[named] @@ -308,7 +297,7 @@ impl Create for Redis { } } -impl Pause for Redis { +impl Pause for RedisScw { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -345,7 +334,7 @@ impl Pause for Redis { } } -impl Delete for Redis { +impl Delete for RedisScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -382,7 +371,7 @@ impl Delete for Redis { } } -impl Listen for Redis { +impl Listen for RedisScw { fn listeners(&self) -> &Listeners { &self.listeners } diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 98bb8f3e..0e5e6469 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -3,9 +3,9 @@ use crate::cloud_provider::helm::{ CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; -use crate::cloud_provider::scaleway::application::{ScwRegion, ScwZone}; use crate::cloud_provider::scaleway::kubernetes::KapsuleOptions; use crate::errors::CommandError; +use crate::models::scaleway::{ScwRegion, ScwZone}; use semver::Version; use serde::{Deserialize, Serialize}; use std::fs::File; @@ -117,7 +117,7 @@ pub fn scw_helm_charts( Err(e) => { let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), + format!("{}, error: {:?}", message_safe, e), Some(message_safe.to_string()), )); } @@ -128,21 +128,18 @@ pub fn scw_helm_charts( let qovery_terraform_config: ScalewayQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!( - "Error while parsing terraform config file {}", - qovery_terraform_config_file - ); + let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe.to_string(), e), - Some(message_safe.to_string()), + format!("{}, error: {:?}", message_safe, e), + Some(message_safe), )); } }; let prometheus_namespace = HelmChartNamespaces::Prometheus; - let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace.to_string()); + let prometheus_internal_url = format!("http://prometheus-operated.{}.svc", prometheus_namespace); let loki_namespace = HelmChartNamespaces::Logging; - let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace.to_string()); + let loki_kube_dns_prefix = format!("loki.{}.svc", loki_namespace); // Qovery storage class let q_storage_class = CommonChart { @@ -302,6 +299,10 @@ pub fn scw_helm_charts( timeout_in_seconds: 480, values_files: vec![chart_path("chart_values/kube-prometheus-stack.yaml")], values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, ChartSetValue { key: "nameOverride".to_string(), value: "prometheus-operator".to_string(), @@ -450,11 +451,7 @@ datasources: type: loki url: \"http://{}.{}.svc:3100\" ", - prometheus_internal_url.clone(), - &loki.chart_info.name, - loki_namespace.to_string(), - &loki.chart_info.name, - loki_namespace.to_string(), + prometheus_internal_url, &loki.chart_info.name, loki_namespace, &loki.chart_info.name, loki_namespace, ); let grafana = CommonChart { diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 44c8b8f5..4b3e8d78 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -10,7 +10,6 @@ use crate::cloud_provider::kubernetes::{ }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::scaleway::application::ScwZone; use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites}; use crate::cloud_provider::scaleway::kubernetes::node::{ScwInstancesType, ScwNodeGroup}; use crate::cloud_provider::utilities::print_action; @@ -23,10 +22,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, }; +use crate::logger::Logger; +use crate::models::scaleway::ScwZone; use crate::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; @@ -162,10 +162,10 @@ impl Kapsule { let err = EngineError::new_unsupported_instance_type( EventDetails::new( Some(cloud_provider.kind()), - QoveryIdentifier::new(context.organization_id().to_string()), - QoveryIdentifier::new(context.cluster_id().to_string()), - QoveryIdentifier::new(context.execution_id().to_string()), - Some(zone.region_str().to_string()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), + Some(zone.region_str()), Stage::Infrastructure(InfrastructureStep::LoadConfiguration), Transmitter::Kubernetes(id, name), ), @@ -173,7 +173,7 @@ impl Kapsule { e, ); - logger.log(LogLevel::Error, EngineEvent::Error(err.clone(), None)); + logger.log(EngineEvent::Error(err.clone(), None)); return Err(err); } @@ -183,8 +183,8 @@ impl Kapsule { context.clone(), "s3-temp-id".to_string(), "default-s3".to_string(), - cloud_provider.access_key_id().clone(), - cloud_provider.secret_access_key().clone(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), zone, BucketDeleteStrategy::Empty, false, @@ -240,7 +240,7 @@ impl Kapsule { Err(e) => { let msg = format!("wasn't able to retrieve SCW cluster information from the API. {:?}", e); return Err(EngineError::new_cannot_get_cluster_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } @@ -248,9 +248,9 @@ impl Kapsule { // if no cluster exists let cluster_info_content = cluster_info.clusters.unwrap(); - if &cluster_info_content.len() == &(0 as usize) { + if cluster_info_content.is_empty() { return Ok(None); - } else if &cluster_info_content.len() != &(1 as usize) { + } else if cluster_info_content.len() != 1_usize { let msg = format!( "too many clusters found with this name, where 1 was expected. {:?}", &cluster_info_content.len() @@ -268,7 +268,7 @@ impl Kapsule { &self, cluster_info: ScalewayK8sV1Cluster, ) -> Result, ScwNodeGroupErrors> { - let error_cluster_id = format!("expected cluster id for this Scaleway cluster"); + let error_cluster_id = "expected cluster id for this Scaleway cluster".to_string(); let cluster_id = match cluster_info.id { None => { return Err(ScwNodeGroupErrors::NodeGroupValidationError( @@ -291,7 +291,7 @@ impl Kapsule { Ok(x) => x, Err(e) => { let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id); - let msg_with_error = format!("{}. {:?}", msg.clone(), e); + let msg_with_error = format!("{}. {:?}", msg, e); return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( msg_with_error, Some(msg), @@ -304,22 +304,16 @@ impl Kapsule { let msg = format!( "No SCW pool found from the SCW API for cluster {}/{}", &cluster_id, - &cluster_info.name.unwrap_or("unknown cluster".to_string()) + &cluster_info.name.unwrap_or_else(|| "unknown cluster".to_string()) ); - return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( - msg.clone(), - Some(msg), - ))); + return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new(msg.clone(), Some(msg)))); } // create sanitized nodegroup pools let mut nodegroup_pool: Vec = Vec::with_capacity(pools.total_count.unwrap_or(0 as f32) as usize); for ng in pools.pools.unwrap() { if ng.id.is_none() { - let msg = format!( - "error while trying to validate SCW pool ID from cluster {}", - &cluster_id - ); + let msg = format!("error while trying to validate SCW pool ID from cluster {}", &cluster_id); return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new( msg.clone(), Some(msg), @@ -342,10 +336,8 @@ impl Kapsule { Err(e) => { return Err(match e { Error::ResponseError(x) => { - let msg_with_error = format!( - "Error code while getting node group: {}, API message: {} ", - x.status, x.content - ); + let msg_with_error = + format!("Error code while getting node group: {}, API message: {} ", x.status, x.content); match x.status { StatusCode::NOT_FOUND => ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( msg_with_error, @@ -396,20 +388,17 @@ impl Kapsule { fn check_missing_nodegroup_info(&self, item: &Option, name: &str) -> Result<(), ScwNodeGroupErrors> { let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration)); - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_workers_group_info_error( - event_details, - CommandError::new_from_safe_message(format!( - "Missing node pool info {} for cluster {}", - name, - self.context.cluster_id() - )), - ), - None, + self.logger.log(EngineEvent::Error( + EngineError::new_missing_workers_group_info_error( + event_details, + CommandError::new_from_safe_message(format!( + "Missing node pool info {} for cluster {}", + name, + self.context.cluster_id() + )), ), - ); + None, + )); if item.is_none() { return Err(ScwNodeGroupErrors::MissingNodePoolInfo); @@ -451,14 +440,8 @@ impl Kapsule { context.insert("managed_dns", &managed_dns_list); context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert( - "managed_dns_domains_root_helm_format", - &managed_dns_domains_root_helm_format, - ); - context.insert( - "managed_dns_domains_terraform_format", - &managed_dns_domains_terraform_format, - ); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); context.insert( "managed_dns_domains_root_terraform_format", &managed_dns_domains_root_terraform_format, @@ -492,29 +475,17 @@ impl Kapsule { context.insert("qovery_nats_url", self.options.qovery_nats_url.as_str()); context.insert("qovery_nats_user", self.options.qovery_nats_user.as_str()); context.insert("qovery_nats_password", self.options.qovery_nats_password.as_str()); - context.insert( - "engine_version_controller_token", - &self.options.engine_version_controller_token, - ); - context.insert( - "agent_version_controller_token", - &self.options.agent_version_controller_token, - ); + context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); + context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); // Qovery features - context.insert( - "log_history_enabled", - &self.context.is_feature_enabled(&Features::LogsHistory), - ); + context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); context.insert( "metrics_history_enabled", &self.context.is_feature_enabled(&Features::MetricsHistory), ); if self.context.resource_expiration_in_seconds().is_some() { - context.insert( - "resource_expiration_in_seconds", - &self.context.resource_expiration_in_seconds(), - ) + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } // AWS S3 tfstates storage tfstates @@ -554,16 +525,13 @@ impl Kapsule { match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details.clone(), - "VAULT_SECRET_ID".to_string(), - ), - None, + None => self.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable( + event_details, + "VAULT_SECRET_ID".to_string(), ), - ), + None, + )), } } None => { @@ -613,13 +581,10 @@ impl Kapsule { format!("Preparing SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing SCW cluster deployment.".to_string()), - ), - ); + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing SCW cluster deployment.".to_string()), + )); // upgrade cluster instead if required match self.get_kubeconfig_file() { @@ -635,28 +600,18 @@ impl Kapsule { return self.upgrade_with_status(x); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) } Err(e) => { - self.logger().log(LogLevel::Error, EngineEvent::Error(e, None)); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); } }, - Err(_) => self.logger().log(LogLevel::Info, EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) }; @@ -671,9 +626,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -682,24 +637,20 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Deploying SCW cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deploying SCW cluster.".to_string()), + )); self.send_to_customer( format!("Deploying SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), @@ -714,16 +665,13 @@ impl Kapsule { for entry in x.clone() { if entry.starts_with(item) { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details.clone(), + event_details, entry.to_string(), e, )) @@ -733,79 +681,60 @@ impl Kapsule { } } } - Err(e) => self.logger().log( - LogLevel::Warning, - EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - ), - ), + Err(e) => self.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), }; // TODO(benjaminch): move this elsewhere // Create object-storage buckets - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Create Qovery managed object storage buckets".to_string()), + )); if let Err(e) = self .object_storage .create_bucket(self.kubeconfig_bucket_name().as_str()) { let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), + event_details, self.kubeconfig_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } // Logs bucket if let Err(e) = self.object_storage.create_bucket(self.logs_bucket_name().as_str()) { - let error = EngineError::new_object_storage_cannot_create_bucket_error( - event_details.clone(), - self.logs_bucket_name(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), - ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + let error = + EngineError::new_object_storage_cannot_create_bucket_error(event_details, self.logs_bucket_name(), e); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } // push config file to object storage + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); let kubeconfig_name = format!("{}.yaml", self.id()); if let Err(e) = self.object_storage.put( self.kubeconfig_bucket_name().as_str(), kubeconfig_name.as_str(), - format!( - "{}/{}/{}", - temp_dir.as_str(), - self.kubeconfig_bucket_name().as_str(), - kubeconfig_name.as_str() - ) - .as_str(), + kubeconfig_path.to_str().expect("No path for Kubeconfig"), ) { let error = EngineError::new_object_storage_cannot_put_file_into_bucket_error( - event_details.clone(), + event_details, self.logs_bucket_name(), kubeconfig_name.to_string(), - CommandError::new(e.message.unwrap_or("No error message".to_string()), None), + e, ); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } @@ -813,7 +742,7 @@ impl Kapsule { if cluster_info.is_none() { let msg = "no cluster found from the Scaleway API".to_string(); return Err(EngineError::new_no_cluster_found_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } @@ -826,45 +755,39 @@ impl Kapsule { match e { ScwNodeGroupErrors::CloudProviderApiError(c) => { return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(c), )) } - ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "cluster do not exists, no node groups can be retrieved for upgrade check".to_string(), - ), + ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe( + "cluster do not exists, no node groups can be retrieved for upgrade check".to_string(), ), - ), + )), ScwNodeGroupErrors::MultipleClusterFound => { let msg = "multiple clusters found, can't match the correct node groups".to_string(); return Err(EngineError::new_multiple_cluster_found_expected_one_error( - event_details.clone(), + event_details, CommandError::new(msg.clone(), Some(msg)), )); } - ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "cluster exists, but no node groups found for upgrade check".to_string(), - ), + ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe( + "cluster exists, but no node groups found for upgrade check".to_string(), ), - ), + )), ScwNodeGroupErrors::MissingNodePoolInfo => { - let msg = format!("Error with Scaleway API while trying to retrieve node pool info"); + let msg = "Error with Scaleway API while trying to retrieve node pool info".to_string(); return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(CommandError::new_from_safe_message(msg)), )); } ScwNodeGroupErrors::NodeGroupValidationError(c) => { return Err(EngineError::new_missing_api_info_from_cloud_provider_error( - event_details.clone(), + event_details, Some(c), )); } @@ -874,33 +797,27 @@ impl Kapsule { }; // ensure all node groups are in ready state Scaleway side - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "ensuring all groups nodes are in ready state from the Scaleway API".to_string(), - ), + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "ensuring all groups nodes are in ready state from the Scaleway API".to_string(), ), - ); + )); for ng in current_nodegroups { let res = retry::retry( // retry 10 min max per nodegroup until they are ready Fixed::from_millis(15000).take(40), || { - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "checking node group {}/{:?}, current status: {:?}", - &ng.name, - &ng.id.as_ref().unwrap_or(&"unknown".to_string()), - &ng.status - )), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "checking node group {}/{:?}, current status: {:?}", + &ng.name, + &ng.id.as_ref().unwrap_or(&"unknown".to_string()), + &ng.status + )), + )); let pool_id = match &ng.id { None => { let msg = @@ -923,15 +840,13 @@ impl Kapsule { event_details.clone(), Some(c), ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } ScwNodeGroupErrors::ClusterDoesNotExists(c) => { let current_error = EngineError::new_no_cluster_found_error(event_details.clone(), c); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } ScwNodeGroupErrors::MultipleClusterFound => { @@ -954,8 +869,7 @@ impl Kapsule { event_details.clone(), Some(c), ); - self.logger - .log(LogLevel::Error, EngineEvent::Error(current_error.clone(), None)); + self.logger.log(EngineEvent::Error(current_error.clone(), None)); OperationResult::Retry(current_error) } } @@ -978,21 +892,18 @@ impl Kapsule { Err(Operation { error, .. }) => return Err(error), Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_k8s_node_not_ready( - event_details.clone(), + event_details, CommandError::new(msg, Some("Waiting for too long worker nodes to be ready".to_string())), )) } } } - self.logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "all node groups for this cluster are ready from cloud provider API".to_string(), - ), + self.logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "all node groups for this cluster are ready from cloud provider API".to_string(), ), - ); + )); // ensure all nodes are ready on Kubernetes match self.check_workers_on_create() { @@ -1001,23 +912,17 @@ impl Kapsule { format!("Kubernetes {} nodes have been successfully created", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), - ), - ) + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes nodes have been successfully created".to_string()), + )) } Err(e) => { - return Err(EngineError::new_k8s_node_not_ready(event_details.clone(), e)); + return Err(EngineError::new_k8s_node_not_ready(event_details, e)); } }; // kubernetes helm deployments on the cluster - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - let credentials_environment_variables: Vec<(String, String)> = self .cloud_provider .credentials_environment_variables() @@ -1034,8 +939,8 @@ impl Kapsule { self.cluster_name(), "scw".to_string(), self.context.is_test_cluster(), - self.cloud_provider.access_key_id().to_string(), - self.cloud_provider.secret_access_key().to_string(), + self.cloud_provider.access_key_id(), + self.cloud_provider.secret_access_key(), self.options.scaleway_project_id.to_string(), self.options.qovery_engine_location.clone(), self.context.is_feature_enabled(&Features::LogsHistory), @@ -1052,24 +957,21 @@ impl Kapsule { self.options.clone(), ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); let helm_charts_to_deploy = scw_helm_charts( format!("{}/qovery-tf-config.json", &temp_dir).as_str(), &charts_prerequisites, Some(&temp_dir), - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, ) .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; deploy_charts_levels( - &kubeconfig_path, + kubeconfig_path, &credentials_environment_variables, helm_charts_to_deploy, self.context.is_dry_run_deploy(), @@ -1082,39 +984,29 @@ impl Kapsule { let (kubeconfig_path, _) = self.get_kubeconfig_file()?; let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("SCW.create_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe("SCW.create_error() called.".to_string()), + )); match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self.logger().log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(ok_line)), - ), - Err(err) => self.logger().log( - LogLevel::Error, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), - ), - ), + Ok(ok_line) => self + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(ok_line))), + Err(err) => self.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + )), }; Ok(()) } fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("SCW.upgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe("SCW.upgrade_error() called.".to_string()), + )); Ok(()) } @@ -1124,13 +1016,10 @@ impl Kapsule { } fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deploying( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("SCW.downgrade_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe("SCW.downgrade_error() called.".to_string()), + )); Ok(()) } @@ -1144,13 +1033,10 @@ impl Kapsule { &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing SCW cluster pause.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("Preparing SCW cluster pause.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1167,9 +1053,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1178,13 +1064,12 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } @@ -1202,18 +1087,14 @@ impl Kapsule { tf_workers_resources_name } Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details.clone(), e); - self.logger() - .log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); + self.logger().log(EngineEvent::Error(error.clone(), None)); return Err(error); } }; if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes( - event_details.clone(), - None, - )); + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); } let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; @@ -1239,7 +1120,7 @@ impl Kapsule { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.to_string()), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); } _ => {} } @@ -1254,24 +1135,24 @@ impl Kapsule { Err(e) => { let safe_message = format!("Error while looking at the API metric value {}", metric_name); OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.message()), Some(safe_message.to_string())))) + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.message()), Some(safe_message)))) } }; }); match wait_engine_job_finish { Ok(_) => { - self.logger().log(LogLevel::Info, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { return Err(error) } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) } } } - false => self.logger().log(LogLevel::Warning, EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe("The Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), } } @@ -1285,39 +1166,28 @@ impl Kapsule { format!("Pausing SCW {} cluster deployment with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing( - event_details.clone(), - EventMessage::new_from_safe("Pausing SCW cluster deployment.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing SCW cluster deployment.".to_string()), + )); match terraform_exec(temp_dir.as_str(), terraform_args) { Ok(_) => { let message = format!("Kubernetes cluster {} successfully paused", self.name()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); Ok(()) } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )), + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), } } fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Pausing( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("SCW.pause_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe("SCW.pause_error() called.".to_string()), + )); Ok(()) } @@ -1325,19 +1195,16 @@ impl Kapsule { fn delete(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; + let skip_kubernetes_step = false; self.send_to_customer( format!("Preparing to delete SCW cluster {} with id {}", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete SCW cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing to delete SCW cluster.".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1350,9 +1217,9 @@ impl Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1361,33 +1228,16 @@ impl Kapsule { // this is due to the required dependencies of lib/scaleway/bootstrap/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = - crate::template::copy_non_template_files(bootstrap_charts_dir.to_string(), common_charts_temp_dir.as_str()) + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - bootstrap_charts_dir.to_string(), - common_charts_temp_dir.to_string(), + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, e, )); } - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), - ), - ); - skip_kubernetes_step = true; - "".to_string() - } - }; - // should apply before destroy to be sure destroy will compute on all resources // don't exit on failure, it can happen if we resume a destroy process let message = format!( @@ -1396,29 +1246,24 @@ impl Kapsule { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log( - LogLevel::Error, - EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - ), - ); + self.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); }; + let kubeconfig_path = &self.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(kubeconfig_path); + if !skip_kubernetes_step { // should make the diff between all namespaces and qovery managed namespaces let message = format!( @@ -1426,14 +1271,14 @@ impl Kapsule { self.name(), self.id() ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); self.send_to_customer(&message, &listeners_helper); let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), ); @@ -1442,42 +1287,33 @@ impl Kapsule { let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); for namespace_to_delete in namespaces_to_delete.iter() { match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, namespace_to_delete, self.cloud_provider().credentials_environment_variables(), ) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); } } } @@ -1488,13 +1324,10 @@ impl Kapsule { "Error while getting all namespaces for Kubernetes cluster {}", self.name_with_id(), ); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), - ), - ); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message())), + )); } } @@ -1504,166 +1337,123 @@ impl Kapsule { self.id() ); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubernetes_config_file_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; + let helm = Helm::new(&kubeconfig_path, &self.cloud_provider.credentials_environment_variables()) + .map_err(|e| to_engine_error(&event_details, e))?; let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| to_engine_error(&event_details, e))?; // required to avoid namespace stuck on deletion uninstall_cert_manager( - &kubernetes_config_file_path, + &kubeconfig_path, self.cloud_provider().credentials_environment_variables(), event_details.clone(), self.logger(), )?; - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); let qovery_namespaces = get_qovery_managed_namespaces(); for qovery_namespace in qovery_namespaces.iter() { let charts_to_delete = helm - .list_release(Some(qovery_namespace), &vec![]) + .list_release(Some(qovery_namespace), &[]) .map_err(|e| to_engine_error(&event_details, e))?; for chart in charts_to_delete { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { let message_safe = format!("Can't delete chart `{}`", chart.name); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); for qovery_namespace in qovery_namespaces.iter() { let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, + &kubeconfig_path, qovery_namespace, self.cloud_provider().credentials_environment_variables(), ); match deletion { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - ), - ), + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), Err(e) => { if !(e.message().contains("not found")) { - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete namespace {}.", - qovery_namespace - )), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) } } } } - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); - match helm.list_release(None, &vec![]) { + match helm.list_release(None, &[]) { Ok(helm_charts) => { for chart in helm_charts { let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &vec![]) { - Ok(_) => self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - ), - ), + match helm.uninstall(&chart_info, &[]) { + Ok(_) => self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - ), - ) + let message_safe = format!("Error deleting chart `{}`", chart.name); + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) } } } } Err(e) => { let message_safe = "Unable to get helm list"; - self.logger().log( - LogLevel::Error, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - ), - ) + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) } } }; let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); self.send_to_customer(&message, &listeners_helper); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message)), - ); + self.logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); match retry::retry(Fibonacci::from_millis(60000).take(3), || { match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { @@ -1676,34 +1466,28 @@ impl Kapsule { format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); Ok(()) } Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, error, )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details.clone(), + event_details, CommandError::new(msg, None), )), } } fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log( - LogLevel::Warning, - EngineEvent::Deleting( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("SCW.delete_error() called.".to_string()), - ), - ); + self.logger().log(EngineEvent::Warning( + self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe("SCW.delete_error() called.".to_string()), + )); Ok(()) } @@ -1778,7 +1562,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create()) @@ -1792,7 +1576,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.create_error()) @@ -1810,13 +1594,10 @@ impl Kubernetes for Kapsule { .as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Start preparing SCW cluster upgrade process".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing SCW cluster upgrade process".to_string()), + )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1830,7 +1611,7 @@ impl Kubernetes for Kapsule { self.cloud_provider().credentials_environment_variables(), Stage::Infrastructure(InfrastructureStep::Upgrade), ) { - self.logger().log(LogLevel::Error, EngineEvent::Error(e.clone(), None)); + self.logger().log(EngineEvent::Error(e.clone(), None)); return Err(e); } @@ -1841,13 +1622,10 @@ impl Kubernetes for Kapsule { format!("Preparing nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Preparing nodes for upgrade for Kubernetes cluster.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing nodes for upgrade for Kubernetes cluster.".to_string()), + )); context.insert( "kubernetes_cluster_version", @@ -1860,9 +1638,9 @@ impl Kubernetes for Kapsule { context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, self.template_directory.to_string(), - temp_dir.to_string(), + temp_dir, e, )); } @@ -1873,9 +1651,9 @@ impl Kubernetes for Kapsule { crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - common_bootstrap_charts.to_string(), - common_charts_temp_dir.to_string(), + event_details, + common_bootstrap_charts, + common_charts_temp_dir, e, )); } @@ -1884,13 +1662,10 @@ impl Kubernetes for Kapsule { format!("Upgrading Kubernetes {} nodes", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), - ), - ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes nodes.".to_string()), + )); match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { Ok(_) => match self.check_workers_on_upgrade(kubernetes_upgrade_status.requested_version.to_string()) { @@ -1899,29 +1674,21 @@ impl Kubernetes for Kapsule { format!("Kubernetes {} nodes have been successfully upgraded", self.name()).as_str(), &listeners_helper, ); - self.logger().log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes nodes have been successfully upgraded.".to_string(), - ), - ), - ); + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes nodes have been successfully upgraded.".to_string()), + )); } Err(e) => { return Err(EngineError::new_k8s_node_not_ready_with_requested_version( - event_details.clone(), + event_details, kubernetes_upgrade_status.requested_version.to_string(), e, )); } }, Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline( - event_details.clone(), - e, - )); + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } } @@ -1936,7 +1703,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade()) @@ -1950,7 +1717,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) @@ -1964,7 +1731,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade()) @@ -1978,7 +1745,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) @@ -1992,7 +1759,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause()) @@ -2006,7 +1773,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Pause, || self.pause_error()) @@ -2020,7 +1787,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete()) @@ -2034,7 +1801,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); send_progress_on_long_task(self, Action::Delete, || self.delete_error()) @@ -2090,7 +1857,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) @@ -2118,7 +1885,7 @@ impl Kubernetes for Kapsule { self.struct_name(), function_name!(), self.name(), - event_details.clone(), + event_details, self.logger(), ); Ok(()) diff --git a/src/cloud_provider/scaleway/kubernetes/node.rs b/src/cloud_provider/scaleway/kubernetes/node.rs index e1d85bd5..8cd5419c 100644 --- a/src/cloud_provider/scaleway/kubernetes/node.rs +++ b/src/cloud_provider/scaleway/kubernetes/node.rs @@ -82,7 +82,7 @@ impl FromStr for ScwInstancesType { "render-s" => Ok(ScwInstancesType::RenderS), _ => { let message = format!("`{}` instance type is not supported", s); - return Err(CommandError::new(message.clone(), Some(message))); + Err(CommandError::new(message.clone(), Some(message))) } } } diff --git a/src/cloud_provider/scaleway/mod.rs b/src/cloud_provider/scaleway/mod.rs index fa03ff8c..ceaf8c3c 100644 --- a/src/cloud_provider/scaleway/mod.rs +++ b/src/cloud_provider/scaleway/mod.rs @@ -4,9 +4,8 @@ use uuid::Uuid; use crate::cloud_provider::{CloudProvider, EngineError, Kind, TerraformStateCredentials}; use crate::constants::{SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY}; use crate::events::{EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod kubernetes; pub mod router; diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index 6db22aca..f7c3cb95 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -4,7 +4,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Service, ServiceType, StatelessService, + Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -12,11 +12,11 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::logger::Logger; use ::function_name::named; -pub struct Router { +pub struct RouterScw { context: Context, id: String, action: Action, @@ -29,7 +29,7 @@ pub struct Router { logger: Box, } -impl Router { +impl RouterScw { pub fn new( context: Context, id: &str, @@ -42,7 +42,7 @@ impl Router { listeners: Listeners, logger: Box, ) -> Self { - Router { + RouterScw { context, id: id.to_string(), name: name.to_string(), @@ -65,7 +65,7 @@ impl Router { } } -impl Service for Router { +impl Service for RouterScw { fn context(&self) -> &Context { &self.context } @@ -133,8 +133,8 @@ impl Service for Router { let mut context = default_tera_context(self, kubernetes, environment); let applications = environment - .stateless_services - .iter() + .stateless_services() + .into_iter() .filter(|x| x.service_type() == ServiceType::Application) .collect::>(); @@ -154,7 +154,7 @@ impl Service for Router { let route_data_templates = self .routes .iter() - .map(|r| { + .filter_map(|r| { match applications .iter() .find(|app| app.name() == r.application_name.as_str()) @@ -167,8 +167,6 @@ impl Service for Router { _ => None, } }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) .collect::>(); let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str()); @@ -194,16 +192,16 @@ impl Service for Router { Ok(context) } - fn selector(&self) -> Option { - Some(format!("routerId={}", self.id)) - } - fn logger(&self) -> &dyn Logger { &*self.logger } + + fn selector(&self) -> Option { + Some(format!("routerId={}", self.id)) + } } -impl crate::cloud_provider::service::Router for Router { +impl Router for RouterScw { fn domains(&self) -> Vec<&str> { let mut _domains = vec![self.default_domain.as_str()]; @@ -219,7 +217,7 @@ impl crate::cloud_provider::service::Router for Router { } } -impl Helm for Router { +impl Helm for RouterScw { fn helm_selector(&self) -> Option { self.selector() } @@ -241,7 +239,7 @@ impl Helm for Router { } } -impl Listen for Router { +impl Listen for RouterScw { fn listeners(&self) -> &Listeners { &self.listeners } @@ -251,15 +249,19 @@ impl Listen for Router { } } -impl StatelessService for Router {} +impl StatelessService for RouterScw { + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} -impl ToTransmitter for Router { +impl ToTransmitter for RouterScw { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id().to_string(), self.name().to_string()) } } -impl Create for Router { +impl Create for RouterScw { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); @@ -288,9 +290,9 @@ impl Create for Router { crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), - from_dir.to_string(), - workspace_dir.to_string(), + event_details, + from_dir, + workspace_dir, e, )); } @@ -315,7 +317,7 @@ impl Create for Router { self.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e)) } @@ -338,19 +340,16 @@ impl Create for Router { } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log( - LogLevel::Warning, - EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), + self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!( + "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", + domain_to_check.domain, ), + Some(err.to_string()), ), - ); + )); } } } @@ -376,7 +375,7 @@ impl Create for Router { } } -impl Pause for Router { +impl Pause for RouterScw { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); @@ -410,7 +409,7 @@ impl Pause for Router { } } -impl Delete for Router { +impl Delete for RouterScw { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); @@ -422,7 +421,7 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, false, event_details) + delete_router(target, self, event_details) } fn on_delete_check(&self) -> Result<(), EngineError> { @@ -440,6 +439,6 @@ impl Delete for Router { event_details.clone(), self.logger(), ); - delete_router(target, self, true, event_details) + delete_router(target, self, event_details) } } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index f91b5422..b3111f51 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -7,7 +7,6 @@ use std::time::Duration; use tera::Context as TeraContext; -use crate::build_platform::Image; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::kubernetes::Kubernetes; @@ -21,12 +20,12 @@ use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replica use crate::cmd::structs::LabelsContent; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; -use crate::logger::{LogLevel, Logger}; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Context, DatabaseMode, Listen, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; +use crate::logger::Logger; pub trait Service: ToTransmitter { fn context(&self) -> &Context; @@ -73,16 +72,12 @@ pub trait Service: ToTransmitter { fn min_instances(&self) -> u32; fn max_instances(&self) -> u32; fn publicly_accessible(&self) -> bool; - fn fqdn<'a>(&self, target: &DeploymentTarget, fqdn: &'a String, is_managed: bool) -> String { + fn fqdn(&self, target: &DeploymentTarget, fqdn: &str, is_managed: bool) -> String { match &self.publicly_accessible() { true => fqdn.to_string(), false => match is_managed { true => format!("{}-dns.{}.svc.cluster.local", self.id(), target.environment.namespace()), - false => format!( - "{}.{}.svc.cluster.local", - self.sanitized_name(), - target.environment.namespace() - ), + false => format!("{}.{}.svc.cluster.local", self.sanitized_name(), target.environment.namespace()), }, } } @@ -119,6 +114,7 @@ pub trait Service: ToTransmitter { } pub trait StatelessService: Service + Create + Pause + Delete { + fn as_stateless_service(&self) -> &dyn StatelessService; fn exec_action(&self, deployment_target: &DeploymentTarget) -> Result<(), EngineError> { match self.action() { crate::cloud_provider::service::Action::Create => self.on_create(deployment_target), @@ -139,6 +135,7 @@ pub trait StatelessService: Service + Create + Pause + Delete { } pub trait StatefulService: Service + Create + Pause + Delete { + fn as_stateful_service(&self) -> &dyn StatefulService; fn exec_action(&self, deployment_target: &DeploymentTarget) -> Result<(), EngineError> { match self.action() { crate::cloud_provider::service::Action::Create => self.on_create(deployment_target), @@ -160,11 +157,6 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait Application: StatelessService { - fn image(&self) -> &Image; - fn set_image(&mut self, image: Image); -} - pub trait Router: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; fn has_custom_domains(&self) -> bool; @@ -295,7 +287,7 @@ impl<'a> ServiceType<'a> { impl<'a> ToString for ServiceType<'a> { fn to_string(&self) -> String { - self.name().to_string() + self.name() } } @@ -310,20 +302,17 @@ where { let kubernetes = deployment_target.kubernetes; let environment = deployment_target.environment; - match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details.clone()) { + match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details) { Ok(lines) => lines, Err(err) => { - logger.log( - LogLevel::Error, - EngineEvent::Error( - err, - Some(EventMessage::new_from_safe(format!( - "error while retrieving debug logs from {} {}", - service.service_type().name(), - service.name_with_id(), - ))), - ), - ); + logger.log(EngineEvent::Error( + err, + Some(EventMessage::new_from_safe(format!( + "error while retrieving debug logs from {} {}", + service.service_type().name(), + service.name_with_id(), + ))), + )); Vec::new() } @@ -354,8 +343,8 @@ pub fn default_tera_context( context.insert("max_instances", &service.max_instances()); context.insert("is_private_port", &service.private_port().is_some()); - if service.private_port().is_some() { - context.insert("private_port", &service.private_port().unwrap()); + if let Some(private_port) = service.private_port() { + context.insert("private_port", &private_port); } context.insert("version", &service.version()); @@ -389,9 +378,9 @@ where tera_context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -439,19 +428,19 @@ where service.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e))?; crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry( kubernetes_config_file_path.as_str(), environment.namespace(), - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ) .map_err(|e| { EngineError::new_k8s_pod_not_ready( event_details.clone(), - service.selector().unwrap_or("".to_string()), + service.selector().unwrap_or_default(), environment.namespace().to_string(), e, ) @@ -521,13 +510,13 @@ pub fn scale_down_application( kubernetes.cloud_provider().credentials_environment_variables(), environment.namespace(), scaling_kind, - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), replicas_count as u32, ) .map_err(|e| { EngineError::new_k8s_scale_replicas( event_details.clone(), - service.selector().unwrap_or("".to_string()), + service.selector().unwrap_or_default(), environment.namespace().to_string(), replicas_count as u32, e, @@ -535,24 +524,18 @@ pub fn scale_down_application( }) } -pub fn delete_router( - target: &DeploymentTarget, - service: &T, - is_error: bool, - event_details: EventDetails, -) -> Result<(), EngineError> +pub fn delete_router(target: &DeploymentTarget, service: &T, event_details: EventDetails) -> Result<(), EngineError> where T: Router, { send_progress_on_long_task(service, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, service, is_error, event_details.clone()) + delete_stateless_service(target, service, event_details.clone()) }) } pub fn delete_stateless_service( target: &DeploymentTarget, service: &T, - is_error: bool, event_details: EventDetails, ) -> Result<(), EngineError> where @@ -562,22 +545,8 @@ where let environment = target.environment; let helm_release_name = service.helm_release_name(); - if is_error { - let _ = get_stateless_resource_information( - kubernetes, - environment, - service.selector().unwrap_or("".to_string()).as_str(), - Stage::Environment(EnvironmentStep::Delete), - )?; - } - // clean the resource - let _ = helm_uninstall_release( - kubernetes, - environment, - helm_release_name.as_str(), - event_details.clone(), - )?; + let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str(), event_details)?; Ok(()) } @@ -596,17 +565,14 @@ where let environment = target.environment; if service.is_managed_service() { - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Deploying managed {} `{}`", - service.service_type().name(), - service.name_with_id() - )), - ), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Deploying managed {} `{}`", + service.service_type().name(), + service.name_with_id() + )), + )); let context = service.tera_context(target)?; @@ -616,9 +582,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_common_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -629,9 +595,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -640,12 +606,12 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), external_svc_dir.as_str(), - context.clone(), + context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + external_svc_dir, e, )); } @@ -657,17 +623,14 @@ where .map_err(|e| EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e))?; } else { // use helm - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Deploying containerized {} `{}` on Kubernetes cluster", - service.service_type().name(), - service.name_with_id() - )), - ), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Deploying containerized {} `{}` on Kubernetes cluster", + service.service_type().name(), + service.name_with_id() + )), + )); let context = service.tera_context(target)?; let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; @@ -679,9 +642,9 @@ where context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -690,12 +653,12 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_values_dir(), workspace_dir.as_str(), - context.clone(), + context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_values_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -712,7 +675,7 @@ where // create a namespace with labels if it does not exist crate::cmd::kubectl::kubectl_exec_create_namespace( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, environment.namespace(), namespace_labels, kubernetes.cloud_provider().credentials_environment_variables(), @@ -740,14 +703,14 @@ where service.selector(), ); - helm.upgrade(&chart, &vec![]) + helm.upgrade(&chart, &[]) .map_err(|e| helm::to_engine_error(&event_details, e))?; // check app status let is_pod_ready = crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry( &kubernetes_config_file_path, environment.namespace(), - service.selector().unwrap_or("".to_string()).as_str(), + service.selector().unwrap_or_default().as_str(), kubernetes.cloud_provider().credentials_environment_variables(), ); if let Ok(Some(true)) = is_pod_ready { @@ -755,7 +718,7 @@ where } return Err(EngineError::new_database_failed_to_start_after_several_retries( - event_details.clone(), + event_details, service.name_with_id(), service.service_type().name(), match is_pod_ready { @@ -789,9 +752,9 @@ where tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_common_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -802,9 +765,9 @@ where tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.terraform_resource_dir_path(), - workspace_dir.to_string(), + workspace_dir, e, )); } @@ -812,13 +775,13 @@ where let external_svc_dir = format!("{}/{}", workspace_dir, "external-name-svc"); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + &external_svc_dir, tera_context.clone(), ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - external_svc_dir.to_string(), + external_svc_dir, e, )); } @@ -826,33 +789,29 @@ where if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( service.helm_chart_external_name_service_dir(), workspace_dir.as_str(), - tera_context.clone(), + tera_context, ) { return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details.clone(), + event_details, service.helm_chart_external_name_service_dir(), - workspace_dir.to_string(), + workspace_dir, e, )); } match crate::cmd::terraform::terraform_init_validate_destroy(workspace_dir.as_str(), true) { Ok(_) => { - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - event_details.clone(), - EventMessage::new_from_safe("Deleting secret containing tfstates".to_string()), - ), - ); + logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Deleting secret containing tfstates".to_string()), + )); let _ = delete_terraform_tfstate_secret(kubernetes, environment.namespace(), &get_tfstate_name(service)); } Err(e) => { - let engine_err = - EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details.clone(), e); + let engine_err = EngineError::new_terraform_error_while_executing_destroy_pipeline(event_details, e); - logger.log(LogLevel::Error, EngineEvent::Error(engine_err.clone(), None)); + logger.log(EngineEvent::Error(engine_err.clone(), None)); return Err(engine_err); } @@ -861,12 +820,7 @@ where // If not managed, we use helm to deploy let helm_release_name = service.helm_release_name(); // clean the resource - let _ = helm_uninstall_release( - kubernetes, - environment, - helm_release_name.as_str(), - event_details.clone(), - )?; + let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str(), event_details)?; } Ok(()) @@ -921,10 +875,10 @@ where version.as_str() ); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); let progress_info = ProgressInfo::new( service.progress_scope(), @@ -939,10 +893,10 @@ where VersionsNumber::from_str(&service.version()).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), service.version(), e) })?, - VersionsNumber::from_str(&version.to_string()).map_err(|e| { + VersionsNumber::from_str(&version).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), version.to_string(), e) })?, - Some(message.to_string()), + Some(message), )); } @@ -950,7 +904,7 @@ where VersionsNumber::from_str(&service.version()).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), service.version(), e) })?, - VersionsNumber::from_str(&version.to_string()).map_err(|e| { + VersionsNumber::from_str(&version).map_err(|e| { EngineError::new_version_number_parsing_error(event_details.clone(), version.to_string(), e) })?, None, @@ -973,12 +927,12 @@ where listeners_helper.deployment_error(progress_info); let error = EngineError::new_unsupported_version_error( - event_details.clone(), + event_details, service.service_type().name(), service.version(), ); - logger.log(LogLevel::Error, EngineEvent::Error(error.clone(), None)); + logger.log(EngineEvent::Error(error.clone(), None)); Err(error) } @@ -1012,7 +966,7 @@ pub enum CheckAction { pub fn check_kubernetes_service_error( result: Result<(), EngineError>, kubernetes: &dyn Kubernetes, - service: &Box, + service: &T, event_details: EventDetails, logger: &dyn Logger, deployment_target: &DeploymentTarget, @@ -1040,24 +994,15 @@ where match action { CheckAction::Deploy => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } CheckAction::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } CheckAction::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); } } @@ -1076,19 +1021,16 @@ where kubernetes.context().execution_id(), ); - logger.log( - LogLevel::Error, - EngineEvent::Error( - err.clone(), - Some(EventMessage::new_from_safe(format!( - "{} error with {} {} , id: {}", - action_verb, - service.service_type().name(), - service.name(), - service.id(), - ))), - ), - ); + logger.log(EngineEvent::Error( + err.clone(), + Some(EventMessage::new_from_safe(format!( + "{} error with {} {} , id: {}", + action_verb, + service.service_type().name(), + service.name(), + service.id(), + ))), + )); match action { CheckAction::Deploy => listeners_helper.deployment_error(progress_info), @@ -1110,10 +1052,10 @@ where kubernetes.context().execution_id(), ); - logger.log( - LogLevel::Debug, - EngineEvent::Debug(event_details.clone(), EventMessage::new_from_safe(debug_logs_string)), - ); + logger.log(EngineEvent::Debug( + event_details.clone(), + EventMessage::new_from_safe(debug_logs_string), + )); match action { CheckAction::Deploy => listeners_helper.deployment_error(progress_info), @@ -1121,10 +1063,10 @@ where CheckAction::Delete => listeners_helper.delete_error(progress_info), } - return Err(EngineError::new_k8s_service_issue( - event_details.clone(), + Err(EngineError::new_k8s_service_issue( + event_details, CommandError::new(err.message(), Some("Error with Kubernetes service".to_string())), - )); + )) } _ => { let progress_info = ProgressInfo::new( @@ -1164,13 +1106,13 @@ pub fn get_stateless_resource_information_for_user( where T: Service + ?Sized, { - let selector = service.selector().unwrap_or("".to_string()); + let selector = service.selector().unwrap_or_default(); let mut result = Vec::with_capacity(50); let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; // get logs let logs = crate::cmd::kubectl::kubectl_exec_logs( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, environment.namespace(), selector.as_str(), kubernetes.cloud_provider().credentials_environment_variables(), @@ -1188,7 +1130,7 @@ where // get pod state let pods = crate::cmd::kubectl::kubectl_exec_get_pods( - kubernetes_config_file_path.to_string(), + &kubernetes_config_file_path, Some(environment.namespace()), Some(selector.as_str()), kubernetes.cloud_provider().credentials_environment_variables(), @@ -1252,52 +1194,6 @@ where Ok(result) } -/// show different output (kubectl describe, log..) for debug purpose -pub fn get_stateless_resource_information( - kubernetes: &dyn Kubernetes, - environment: &Environment, - selector: &str, - stage: Stage, -) -> Result<(Describe, Logs), EngineError> { - let event_details = kubernetes.get_event_details(stage); - let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - // exec describe pod... - let describe = crate::cmd::kubectl::kubectl_exec_describe_pod( - kubernetes_config_file_path.to_string(), - environment.namespace(), - selector, - kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| { - EngineError::new_k8s_describe( - event_details.clone(), - selector.to_string(), - environment.namespace().to_string(), - e, - ) - })?; - - // exec logs... - let logs = crate::cmd::kubectl::kubectl_exec_logs( - kubernetes_config_file_path.to_string(), - environment.namespace(), - selector, - kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| { - EngineError::new_k8s_get_logs_error( - event_details.clone(), - selector.to_string(), - environment.namespace().to_string(), - e, - ) - })? - .join("\n"); - - Ok((describe, logs)) -} - pub fn helm_uninstall_release( kubernetes: &dyn Kubernetes, environment: &Environment, @@ -1313,7 +1209,7 @@ pub fn helm_uninstall_release( .map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?; let chart = ChartInfo::new_from_release_name(helm_release_name, environment.namespace()); - helm.uninstall(&chart, &vec![]) + helm.uninstall(&chart, &[]) .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } @@ -1358,9 +1254,7 @@ where S: Service + Listen, F: Fn() -> R, { - let event_details = service - .get_event_details(Stage::Environment(EnvironmentStep::Deploy)) - .clone(); + let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); let logger = service.logger().clone_dyn(); let listeners = std::clone::Clone::clone(service.listeners()); @@ -1381,7 +1275,7 @@ where let listeners_helper = ListenersHelper::new(&listeners); let action = action; let progress_info = progress_info; - let waiting_message = waiting_message.clone().unwrap_or("No message...".to_string()); + let waiting_message = waiting_message.clone().unwrap_or_else(|| "No message...".to_string()); loop { // do notify users here @@ -1392,42 +1286,33 @@ where match action { Action::Create => { listeners_helper.deployment_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deploying( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Deploy), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Deploy), ), - ); + event_message, + )); } Action::Pause => { listeners_helper.pause_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Pausing( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Pause), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Pause), ), - ); + event_message, + )); } Action::Delete => { listeners_helper.delete_in_progress(progress_info); - logger.log( - LogLevel::Info, - EngineEvent::Deleting( - EventDetails::clone_changing_stage( - event_details, - Stage::Environment(EnvironmentStep::Delete), - ), - event_message, + logger.log(EngineEvent::Info( + EventDetails::clone_changing_stage( + event_details, + Stage::Environment(EnvironmentStep::Delete), ), - ); + event_message, + )); } Action::Nothing => {} // should not happens }; diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 97a74f99..4c51ac0c 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -1,9 +1,11 @@ +#![allow(clippy::field_reassign_with_default)] + use std::collections::HashMap; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventDetails, EventMessage}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; +use crate::io_models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; +use crate::logger::Logger; use chrono::Duration; use core::option::Option::{None, Some}; use core::result::Result; @@ -12,6 +14,7 @@ use retry::delay::Fixed; use retry::OperationResult; use serde::{Deserialize, Serialize}; use std::fmt; +use std::fmt::Write; use std::str::FromStr; use trust_dns_resolver::config::*; use trust_dns_resolver::proto::rr::{RData, RecordType}; @@ -167,10 +170,7 @@ pub fn generate_supported_version( if minor_min == minor_max { // add short minor format targeting latest version - supported_versions.insert( - format!("{}.{}", major.to_string(), minor_max.to_string()), - latest_major_version.clone(), - ); + supported_versions.insert(format!("{}.{}", major, minor_max), latest_major_version.clone()); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor_min, update_min.unwrap()); supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); @@ -184,13 +184,8 @@ pub fn generate_supported_version( for minor in minor_min..minor_max + 1 { // add short minor format targeting latest version supported_versions.insert( - format!("{}.{}", major.to_string(), minor.to_string()), - format!( - "{}.{}.{}", - major.to_string(), - minor.to_string(), - update_max.unwrap().to_string() - ), + format!("{}.{}", major, minor), + format!("{}.{}.{}", major, minor, update_max.unwrap()), ); if update_min.unwrap() == update_max.unwrap() { let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); @@ -240,22 +235,6 @@ impl VersionsNumber { } } - pub fn to_string(&self) -> String { - let mut version = vec![self.major.to_string()]; - - if self.minor.is_some() { - version.push(self.minor.clone().unwrap()) - } - if self.patch.is_some() { - version.push(self.patch.clone().unwrap()) - } - if self.suffix.is_some() { - version.push(self.suffix.clone().unwrap()) - } - - version.join(".") - } - pub fn to_major_version_string(&self) -> String { self.major.clone() } @@ -276,9 +255,7 @@ impl FromStr for VersionsNumber { fn from_str(version: &str) -> Result { if version.trim() == "" { - return Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )); + return Err(CommandError::new_from_safe_message("version cannot be empty".to_string())); } let mut version_split = version.splitn(4, '.').map(|v| v.trim()); @@ -286,7 +263,7 @@ impl FromStr for VersionsNumber { let major = match version_split.next() { Some(major) => { let major = major.to_string(); - major.replace("v", "") + major.replace('v', "") } None => { return Err(CommandError::new_from_safe_message(format!( @@ -298,7 +275,7 @@ impl FromStr for VersionsNumber { let minor = version_split.next().map(|minor| { let minor = minor.to_string(); - minor.replace("+", "") + minor.replace('+', "") }); let patch = version_split.next().map(|patch| patch.to_string()); @@ -314,7 +291,24 @@ impl FromStr for VersionsNumber { impl fmt::Display for VersionsNumber { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.to_string()) + f.write_str(&self.major)?; + + if let Some(minor) = &self.minor { + f.write_char('.')?; + f.write_str(minor)?; + } + + if let Some(patch) = &self.patch { + f.write_char('.')?; + f.write_str(patch)?; + } + + if let Some(suffix) = &self.suffix { + f.write_char('.')?; + f.write_str(suffix)?; + } + + Ok(()) } } @@ -405,10 +399,7 @@ pub fn check_cname_for( match get_cname_record_value(next_resolver(), cname_to_check) { Some(domain) => OperationResult::Ok(domain), None => { - let msg = format!( - "Cannot find domain under CNAME {}. Retrying in 5 seconds...", - cname_to_check - ); + let msg = format!("Cannot find domain under CNAME {}. Retrying in 5 seconds...", cname_to_check); send_deployment_progress(msg.as_str()); OperationResult::Retry(msg) } @@ -464,10 +455,10 @@ pub fn check_domain_for( resolver }; - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); let fixed_iterable = Fixed::from_millis(3000).take(100); let check_result = retry::retry(fixed_iterable, || match next_resolver().lookup_ip(domain) { @@ -475,10 +466,10 @@ pub fn check_domain_for( Err(err) => { let x = format!("Domain resolution check for '{}' is still in progress...", domain); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(x.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(x.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -497,10 +488,10 @@ pub fn check_domain_for( Ok(_) => { let x = format!("Domain {} is ready! ⚡️", domain); - logger.log( - LogLevel::Info, - EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -518,10 +509,10 @@ pub fn check_domain_for( domain ); - logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details.clone(), EventMessage::new_from_safe(message.to_string())), - ); + logger.log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); listener_helper.deployment_in_progress(ProgressInfo::new( ProgressScope::Environment { @@ -539,12 +530,12 @@ pub fn check_domain_for( } pub fn sanitize_name(prefix: &str, name: &str) -> String { - format!("{}-{}", prefix, name).replace("_", "-") + format!("{}-{}", prefix, name).replace('_', "-") } pub fn managed_db_name_sanitizer(max_size: usize, prefix: &str, name: &str) -> String { let max_size = max_size - prefix.len(); - let mut new_name = format!("{}{}", prefix, name.replace("_", "").replace("-", "")); + let mut new_name = format!("{}{}", prefix, name.replace('_', "").replace('-', "")); if new_name.chars().count() > max_size { new_name = new_name[..max_size].to_string(); } @@ -559,19 +550,10 @@ pub fn print_action( event_details: EventDetails, logger: &dyn Logger, ) { - let msg = format!( - "{}.{}.{} called for {}", - cloud_provider_name, struct_name, fn_name, item_name - ); + let msg = format!("{}.{}.{} called for {}", cloud_provider_name, struct_name, fn_name, item_name); match fn_name.contains("error") { - true => logger.log( - LogLevel::Warning, - EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg)), - ), - false => logger.log( - LogLevel::Info, - EngineEvent::Info(event_details, EventMessage::new_from_safe(msg)), - ), + true => logger.log(EngineEvent::Warning(event_details, EventMessage::new_from_safe(msg))), + false => logger.log(EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))), } } @@ -601,23 +583,17 @@ mod tests { let test_cases = vec![ TestCase { input: "", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "empty version str", }, TestCase { input: " ", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "version a tab str", }, TestCase { input: " ", - expected_output: Err(CommandError::new_from_safe_message( - "version cannot be empty".to_string(), - )), + expected_output: Err(CommandError::new_from_safe_message("version cannot be empty".to_string())), description: "version as a space str", }, TestCase { diff --git a/src/cmd/command.rs b/src/cmd/command.rs index 38508699..83cd74a8 100644 --- a/src/cmd/command.rs +++ b/src/cmd/command.rs @@ -5,18 +5,11 @@ use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; use crate::cmd::command::CommandError::{ExecutionError, ExitStatusError, Killed, TimeoutError}; -use crate::cmd::command::CommandOutputType::{STDERR, STDOUT}; -use chrono::Duration; use itertools::Itertools; -use std::time::Instant; +use std::time::{Duration, Instant}; use timeout_readwrite::TimeoutReader; -enum CommandOutputType { - STDOUT(Result), - STDERR(Result), -} - #[derive(thiserror::Error, Debug)] pub enum CommandError { #[error("Error while executing command")] @@ -32,6 +25,69 @@ pub enum CommandError { Killed(String), } +#[derive(Debug, Clone)] +pub enum AbortReason { + Timeout(Duration), + Canceled(String), +} +pub struct CommandKiller<'a> { + should_abort: Box Option + 'a>, +} + +impl<'a> CommandKiller<'a> { + pub fn never() -> CommandKiller<'a> { + CommandKiller { + should_abort: Box::new(|| None), + } + } + + pub fn from_timeout(timeout: Duration) -> CommandKiller<'a> { + let now = Instant::now(); + CommandKiller { + should_abort: Box::new(move || { + if now.elapsed() >= timeout { + return Some(AbortReason::Timeout(timeout)); + } + + None + }), + } + } + + pub fn from_cancelable(is_canceled: &'a dyn Fn() -> bool) -> CommandKiller<'a> { + CommandKiller { + should_abort: Box::new(move || { + if is_canceled() { + return Some(AbortReason::Canceled("Task canceled".to_string())); + } + None + }), + } + } + + pub fn from(timeout: Duration, is_canceled: &'a dyn Fn() -> bool) -> CommandKiller<'a> { + let has_timeout = Self::from_timeout(timeout); + let is_canceled = Self::from_cancelable(is_canceled); + CommandKiller { + should_abort: Box::new(move || { + if let Some(reason) = (has_timeout.should_abort)() { + return Some(reason); + } + + if let Some(reason) = (is_canceled.should_abort)() { + return Some(reason); + } + + None + }), + } + } + + pub fn should_abort(&self) -> Option { + (self.should_abort)() + } +} + pub struct QoveryCommand { command: Command, } @@ -61,52 +117,34 @@ impl QoveryCommand { pub fn exec(&mut self) -> Result<(), CommandError> { self.exec_with_abort( - Duration::max_value(), - |line| info!("{}", line), - |line| warn!("{}", line), - || false, + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), + &CommandKiller::never(), ) } pub fn exec_with_output( &mut self, - stdout_output: STDOUT, - stderr_output: STDERR, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, ) -> Result<(), CommandError> where STDOUT: FnMut(String), STDERR: FnMut(String), { - self.exec_with_abort(Duration::max_value(), stdout_output, stderr_output, || false) + self.exec_with_abort(stdout_output, stderr_output, &CommandKiller::never()) } - pub fn exec_with_timeout( + pub fn exec_with_abort( &mut self, - timeout: Duration, - stdout_output: STDOUT, - stderr_output: STDERR, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, + abort_notifier: &CommandKiller, ) -> Result<(), CommandError> where STDOUT: FnMut(String), STDERR: FnMut(String), { - self.exec_with_abort(timeout, stdout_output, stderr_output, || false) - } - - pub fn exec_with_abort( - &mut self, - timeout: Duration, - mut stdout_output: STDOUT, - mut stderr_output: STDERR, - should_be_killed: F, - ) -> Result<(), CommandError> - where - STDOUT: FnMut(String), - STDERR: FnMut(String), - F: Fn() -> bool, - { - assert!(timeout.num_seconds() > 0, "Timeout cannot be a 0 or negative duration"); - info!("command: {:?}", self.command); let mut cmd_handle = self .command @@ -115,44 +153,88 @@ impl QoveryCommand { .spawn() .map_err(ExecutionError)?; - let process_start_time = Instant::now(); - // Read stdout/stderr until timeout is reached - let reader_timeout = std::time::Duration::from_secs(10.min(timeout.num_seconds() as u64)); - let stdout = cmd_handle.stdout.take().ok_or(ExecutionError(Error::new( - ErrorKind::BrokenPipe, - "Cannot get stdout for command", - )))?; - let stdout_reader = BufReader::new(TimeoutReader::new(stdout, reader_timeout)) - .lines() - .map(STDOUT); + let reader_timeout = std::time::Duration::from_secs(1); + let stdout = cmd_handle + .stdout + .take() + .ok_or_else(|| ExecutionError(Error::new(ErrorKind::BrokenPipe, "Cannot get stdout for command")))?; + let mut stdout_reader = BufReader::new(TimeoutReader::new(stdout, reader_timeout)).lines(); - let stderr = cmd_handle.stderr.take().ok_or(ExecutionError(Error::new( - ErrorKind::BrokenPipe, - "Cannot get stderr for command", - )))?; - let stderr_reader = BufReader::new(TimeoutReader::new( + let stderr = cmd_handle + .stderr + .take() + .ok_or_else(|| ExecutionError(Error::new(ErrorKind::BrokenPipe, "Cannot get stderr for command")))?; + let mut stderr_reader = BufReader::new(TimeoutReader::new( stderr, std::time::Duration::from_secs(0), // don't block on stderr )) - .lines() - .map(STDERR); + .lines(); - for line in stdout_reader.interleave(stderr_reader) { - match line { - STDOUT(Err(ref err)) | STDERR(Err(ref err)) if err.kind() == ErrorKind::TimedOut => {} - STDOUT(Ok(line)) => stdout_output(line), - STDERR(Ok(line)) => stderr_output(line), - STDOUT(Err(err)) => error!("Error on stdout of cmd {:?}: {:?}", self.command, err), - STDERR(Err(err)) => error!("Error on stderr of cmd {:?}: {:?}", self.command, err), - } - - if should_be_killed() { + let mut stdout_closed = false; + let mut stderr_closed = false; + while !stdout_closed || !stderr_closed { + // We should abort and kill the process + if abort_notifier.should_abort().is_some() { break; } - if (process_start_time.elapsed().as_secs() as i64) >= timeout.num_seconds() { - break; + // Read on stdout first + while !stdout_closed { + let line = match stdout_reader.next() { + Some(line) => line, + None => { + // Stdout has been closed + stdout_closed = true; + break; + } + }; + + match line { + Err(ref err) if err.kind() == ErrorKind::TimedOut => break, + Ok(line) => stdout_output(line), + Err(err) => { + error!("Error on stdout of cmd {:?}: {:?}", self.command, err); + stdout_closed = true; + break; + } + } + + // Should we abort and kill the process + if abort_notifier.should_abort().is_some() { + stdout_closed = true; + stderr_closed = true; + break; + } + } + + // Read stderr now + while !stderr_closed { + let line = match stderr_reader.next() { + Some(line) => line, + None => { + // Stdout has been closed + stderr_closed = true; + break; + } + }; + + match line { + Err(ref err) if err.kind() == ErrorKind::TimedOut => break, + Ok(line) => stderr_output(line), + Err(err) => { + error!("Error on stderr of cmd {:?}: {:?}", self.command, err); + stderr_closed = true; + break; + } + } + + // should we abort and kill the process + if abort_notifier.should_abort().is_some() { + stdout_closed = true; + stderr_closed = true; + break; + } } } @@ -167,23 +249,24 @@ impl QoveryCommand { } Ok(None) => { // Does the process should be killed ? - if should_be_killed() { - let msg = format!("Killing process {:?}", self.command); - warn!("{}", msg); - Self::kill(&mut cmd_handle); - return Err(Killed(msg)); - } - - // Does the timeout has been reached ? - if (process_start_time.elapsed().as_secs() as i64) >= timeout.num_seconds() { - let msg = format!( - "Killing process {:?} due to timeout {}m reached", - self.command, - timeout.num_minutes() - ); - warn!("{}", msg); - Self::kill(&mut cmd_handle); - return Err(TimeoutError(msg)); + match abort_notifier.should_abort() { + None => {} + Some(AbortReason::Timeout(timeout)) => { + let msg = format!( + "Killing process {:?} due to timeout {}s reached", + self.command, + timeout.as_secs() + ); + warn!("{}", msg); + Self::kill(&mut cmd_handle); + return Err(TimeoutError(msg)); + } + Some(AbortReason::Canceled(_)) => { + let msg = format!("Killing process {:?}", self.command); + warn!("{}", msg); + Self::kill(&mut cmd_handle); + return Err(Killed(msg)); + } } } Err(err) => return Err(ExecutionError(err)), @@ -208,11 +291,10 @@ impl QoveryCommand { // return the output of "binary_name" --version pub fn run_version_command_for(binary_name: &str) -> String { let mut output_from_cmd = String::new(); - let mut cmd = QoveryCommand::new(binary_name, &vec!["--version"], Default::default()); - let _ = cmd.exec_with_output( - |r_out| output_from_cmd.push_str(&r_out), - |r_err| error!("Error executing {}: {}", binary_name, r_err), - ); + let mut cmd = QoveryCommand::new(binary_name, &["--version"], Default::default()); + let _ = cmd.exec_with_output(&mut |r_out| output_from_cmd.push_str(&r_out), &mut |r_err| { + error!("Error executing {}: {}", binary_name, r_err) + }); output_from_cmd } @@ -240,10 +322,10 @@ where #[cfg(test)] mod tests { - use crate::cmd::command::{does_binary_exist, run_version_command_for, CommandError, QoveryCommand}; - use chrono::Duration; + use crate::cmd::command::{does_binary_exist, run_version_command_for, CommandError, CommandKiller, QoveryCommand}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Barrier}; + use std::time::Duration; use std::{thread, time}; #[test] @@ -257,36 +339,36 @@ mod tests { fn test_run_version_for_command() { let ret = run_version_command_for("ls"); assert_eq!(ret.is_empty(), false); - assert_eq!(ret.contains("GNU"), true) + assert!(ret.contains("GNU")) } #[test] fn test_error() { - let mut cmd = QoveryCommand::new("false", &vec![], &vec![]); + let mut cmd = QoveryCommand::new("false", &[], &[]); assert_eq!(cmd.exec().is_err(), true); - assert_eq!(matches!(cmd.exec(), Err(CommandError::ExitStatusError(_))), true); + assert!(matches!(cmd.exec(), Err(CommandError::ExitStatusError(_)))); } #[test] fn test_command_with_timeout() { - let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); + let mut cmd = QoveryCommand::new("sleep", &["120"], &[]); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); - let mut cmd = QoveryCommand::new("sh", &vec!["-c", "cat /dev/urandom | grep -a --null-data ."], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); + let mut cmd = QoveryCommand::new("sh", &["-c", "cat /dev/urandom | grep -a --null-data ."], &[]); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); assert!(matches!(ret, Err(CommandError::TimeoutError(_)))); - let mut cmd = QoveryCommand::new("sleep", &vec!["1"], &vec![]); - let ret = cmd.exec_with_timeout(Duration::seconds(2), |_| {}, |_| {}); - assert_eq!(ret.is_ok(), true); + let mut cmd = QoveryCommand::new("sleep", &["1"], &[]); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &CommandKiller::from_timeout(Duration::from_secs(2))); + assert!(ret.is_ok()); } #[test] fn test_command_with_abort() { - let mut cmd = QoveryCommand::new("sleep", &vec!["120"], &vec![]); + let mut cmd = QoveryCommand::new("sleep", &["120"], &[]); let should_kill = Arc::new(AtomicBool::new(false)); let should_kill2 = should_kill.clone(); let barrier = Arc::new(Barrier::new(2)); @@ -301,8 +383,9 @@ mod tests { }); let cmd_killer = move || should_kill2.load(Ordering::Acquire); + let cmd_killer = CommandKiller::from_cancelable(&cmd_killer); barrier.wait(); - let ret = cmd.exec_with_abort(Duration::max_value(), |_| {}, |_| {}, cmd_killer); + let ret = cmd.exec_with_abort(&mut |_| {}, &mut |_| {}, &cmd_killer); assert!(matches!(ret, Err(CommandError::Killed(_)))); } diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs new file mode 100644 index 00000000..1d3d37d8 --- /dev/null +++ b/src/cmd/docker.rs @@ -0,0 +1,682 @@ +use crate::cmd::command::{CommandError, CommandKiller, QoveryCommand}; +use lazy_static::lazy_static; +use std::path::Path; +use std::process::ExitStatus; +use std::sync::Mutex; +use url::Url; + +#[derive(thiserror::Error, Debug)] +pub enum DockerError { + #[error("Docker Invalid configuration: {0}")] + InvalidConfig(String), + + #[error("Docker terminated with an unknown error: {0}")] + ExecutionError(#[from] std::io::Error), + + #[error("Docker terminated with a non success exit status code: {0}")] + ExitStatusError(ExitStatus), + + #[error("Docker aborted due to user cancel request: {0}")] + Aborted(String), + + #[error("Docker command terminated due to timeout: {0}")] + Timeout(String), +} + +lazy_static! { + // Docker login when launched in parallel can mess up ~/.docker/config.json + // We use a mutex that will force serialization of logins in order to avoid that + // Mostly use for CI/Test when all test start in parallel and it the login phase at the same time + static ref LOGIN_LOCK: Mutex<()> = Mutex::new(()); +} + +#[derive(Debug)] +pub struct ContainerImage { + pub registry: Url, + pub name: String, + pub tags: Vec, +} + +impl ContainerImage { + pub fn image_names(&self) -> Vec { + let host = if let Some(port) = self.registry.port() { + format!("{}:{}", self.registry.host_str().unwrap_or_default(), port) + } else { + self.registry.host_str().unwrap_or_default().to_string() + }; + + self.tags + .iter() + .map(|tag| format!("{}/{}:{}", host, &self.name, tag)) + .collect() + } + + pub fn image_name(&self) -> String { + self.image_names().remove(0) + } +} + +#[derive(Debug, Clone)] +pub struct Docker { + use_buildkit: bool, + common_envs: Vec<(String, String)>, +} + +impl Docker { + pub fn new_with_options(enable_buildkit: bool, socket_location: Option) -> Result { + let mut docker = Docker { + use_buildkit: enable_buildkit, + common_envs: vec![( + "DOCKER_BUILDKIT".to_string(), + if enable_buildkit { + "1".to_string() + } else { + "0".to_string() + }, + )], + }; + + // Override DOCKER_HOST if we use a TCP socket + if let Some(socket_location) = socket_location { + docker + .common_envs + .push(("DOCKER_HOST".to_string(), socket_location.to_string())) + } + + // If we don't use buildkit nothing more to do + if !docker.use_buildkit { + return Ok(docker); + } + + // First check that the buildx plugin is correctly installed + let args = vec!["buildx", "version"]; + let buildx_cmd_exist = docker_exec( + &args, + &docker.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); + if buildx_cmd_exist.is_err() { + return Err(DockerError::InvalidConfig( + "Docker buildx plugin for buildkit is not correctly installed".to_string(), + )); + } + + // In order to be able to use --cache-from --cache-to for buildkit, + // we need to create our specific builder, which is not the default one (aka: the docker one) + let args = vec![ + "buildx", + "create", + "--name", + "qovery-engine", + "--driver-opt", + "network=host", + "--bootstrap", + "--use", + ]; + let _ = docker_exec( + &args, + &docker.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); + + Ok(docker) + } + + pub fn new(socket_location: Option) -> Result { + Self::new_with_options(true, socket_location) + } + + fn get_all_envs<'a>(&'a self, envs: &'a [(&'a str, &'a str)]) -> Vec<(&'a str, &'a str)> { + let mut all_envs: Vec<(&str, &str)> = self.common_envs.iter().map(|(k, v)| (k.as_str(), v.as_str())).collect(); + all_envs.append(&mut envs.to_vec()); + + all_envs + } + + pub fn login(&self, registry: &Url) -> Result<(), DockerError> { + info!("Docker login {} as user {}", registry, registry.username()); + + let _lock = LOGIN_LOCK.lock().unwrap(); + let password = urlencoding::decode(registry.password().unwrap_or_default()) + .unwrap_or_default() + .to_string(); + let args = vec![ + "login", + registry.host_str().unwrap_or_default(), + "-u", + registry.username(), + "-p", + &password, + ]; + + docker_exec( + &args, + &self.get_all_envs(&[]), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), + &CommandKiller::never(), + )?; + + Ok(()) + } + + pub fn does_image_exist_locally(&self, image: &ContainerImage) -> Result { + info!("Docker check locally image exist {:?}", image); + + let ret = docker_exec( + &["image", "inspect", &image.image_name()], + &self.get_all_envs(&[]), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), + &CommandKiller::never(), + ); + + Ok(matches!(ret, Ok(_))) + } + + // Warning: this command is slow > 10 sec + pub fn does_image_exist_remotely(&self, image: &ContainerImage) -> Result { + info!("Docker check remotely image exist {:?}", image); + + let ret = docker_exec( + &["manifest", "inspect", &image.image_name()], + &self.get_all_envs(&[]), + &mut |line| info!("{}", line), + &mut |line| warn!("{}", line), + &CommandKiller::never(), + ); + + match ret { + Ok(_) => Ok(true), + Err(DockerError::ExitStatusError(_)) => Ok(false), + Err(err) => Err(err), + } + } + + pub fn pull( + &self, + image: &ContainerImage, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, + should_abort: &CommandKiller, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker pull {:?}", image); + + docker_exec( + &["pull", &image.image_name()], + &self.get_all_envs(&[]), + stdout_output, + stderr_output, + should_abort, + ) + } + + pub fn build( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, + should_abort: &CommandKiller, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + // if there is no tags, nothing to build + if image_to_build.tags.is_empty() { + return Ok(()); + } + + // Do some checks + if !dockerfile.is_file() { + return Err(DockerError::InvalidConfig(format!( + "provided dockerfile `{:?}` is not a valid file", + dockerfile + ))); + } + + if !context.is_dir() { + return Err(DockerError::InvalidConfig(format!( + "provided docker build context `{:?}` is not a valid directory", + context + ))); + } + + if self.use_buildkit { + self.build_with_buildkit( + dockerfile, + context, + image_to_build, + build_args, + cache, + push_after_build, + stdout_output, + stderr_output, + should_abort, + ) + } else { + self.build_with_docker( + dockerfile, + context, + image_to_build, + build_args, + cache, + push_after_build, + stdout_output, + stderr_output, + should_abort, + ) + } + } + + fn build_with_docker( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, + should_abort: &CommandKiller, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker build {:?}", image_to_build.image_name()); + + // Best effort to pull the cache, if it does not exist that's ok too + let _ = self.pull(cache, stdout_output, stderr_output, should_abort); + + let mut args_string: Vec = vec![ + "build".to_string(), + "--network".to_string(), + "host".to_string(), + "-f".to_string(), + dockerfile.to_str().unwrap_or_default().to_string(), + ]; + + for image_name in image_to_build.image_names() { + args_string.push("--tag".to_string()); + args_string.push(image_name) + } + + for img_cache_name in cache.image_names() { + args_string.push("--tag".to_string()); + args_string.push(img_cache_name) + } + + for (k, v) in build_args { + args_string.push("--build-arg".to_string()); + args_string.push(format!("{}={}", k, v)); + } + + args_string.push(context.to_str().unwrap_or_default().to_string()); + + let _ = docker_exec( + &args_string.iter().map(|x| x.as_str()).collect::>(), + &self.get_all_envs(&[]), + stdout_output, + stderr_output, + should_abort, + )?; + + if push_after_build { + let _ = self.push(image_to_build, stdout_output, stderr_output, should_abort)?; + } + + Ok(()) + } + + fn build_with_buildkit( + &self, + dockerfile: &Path, + context: &Path, + image_to_build: &ContainerImage, + build_args: &[(&str, &str)], + cache: &ContainerImage, + push_after_build: bool, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, + should_abort: &CommandKiller, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker buildkit build {:?}", image_to_build.image_name()); + + let mut args_string: Vec = vec![ + "buildx".to_string(), + "build".to_string(), + "--progress=plain".to_string(), + "--network=host".to_string(), + if push_after_build { + "--output=type=registry".to_string() // tell buildkit to push image to registry + } else { + "--output=type=docker".to_string() // tell buildkit to load the image into docker after build + }, + "--cache-from".to_string(), + format!("type=registry,ref={}", cache.image_name()), + // Disabled for now, because private ECR does not support it ... + // https://github.com/aws/containers-roadmap/issues/876 + // "--cache-to".to_string(), + // format!("type=registry,ref={}", cache.image_name()), + "-f".to_string(), + dockerfile.to_str().unwrap_or_default().to_string(), + ]; + + for image_name in image_to_build.image_names() { + args_string.push("--tag".to_string()); + args_string.push(image_name.to_string()) + } + + for (k, v) in build_args { + args_string.push("--build-arg".to_string()); + args_string.push(format!("{}={}", k, v)); + } + + args_string.push(context.to_str().unwrap_or_default().to_string()); + + docker_exec( + &args_string.iter().map(|x| x.as_str()).collect::>(), + &self.get_all_envs(&[]), + stdout_output, + stderr_output, + should_abort, + ) + } + + pub fn push( + &self, + image: &ContainerImage, + stdout_output: &mut Stdout, + stderr_output: &mut Stderr, + should_abort: &CommandKiller, + ) -> Result<(), DockerError> + where + Stdout: FnMut(String), + Stderr: FnMut(String), + { + info!("Docker push {:?}", image); + let image_names = image.image_names(); + let mut args = vec!["push"]; + args.extend(image_names.iter().map(|x| x.as_str())); + + docker_exec(&args, &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort) + } + + pub fn prune_images(&self) -> Result<(), DockerError> { + info!("Docker prune images"); + + let all_prunes_commands = vec![ + vec!["container", "prune", "-f"], + vec!["image", "prune", "-a", "-f"], + vec!["builder", "prune", "-a", "-f"], + vec!["volume", "prune", "-f"], + vec!["buildx", "prune", "-a", "-f"], + ]; + + let mut errored_commands = vec![]; + for prune in all_prunes_commands { + let ret = docker_exec( + &prune, + &self.get_all_envs(&[]), + &mut |_| {}, + &mut |_| {}, + &CommandKiller::never(), + ); + if let Err(e) = ret { + errored_commands.push(e); + } + } + + if !errored_commands.is_empty() { + return Err(errored_commands.remove(0)); + } + + Ok(()) + } +} + +fn docker_exec( + args: &[&str], + envs: &[(&str, &str)], + stdout_output: &mut F, + stderr_output: &mut X, + cmd_killer: &CommandKiller, +) -> Result<(), DockerError> +where + F: FnMut(String), + X: FnMut(String), +{ + let mut cmd = QoveryCommand::new("docker", args, envs); + let ret = cmd.exec_with_abort(stdout_output, stderr_output, cmd_killer); + + match ret { + Ok(_) => Ok(()), + Err(CommandError::TimeoutError(msg)) => Err(DockerError::Timeout(msg)), + Err(CommandError::Killed(msg)) => Err(DockerError::Aborted(msg)), + Err(CommandError::ExitStatusError(err)) => Err(DockerError::ExitStatusError(err)), + Err(CommandError::ExecutionError(err)) => Err(DockerError::ExecutionError(err)), + } +} + +// start a local registry to run this test +// docker run --rm -ti -p 5000:5000 --name registry registry:2 +#[cfg(feature = "test-with-docker")] +#[cfg(test)] +mod tests { + use crate::cmd::command::CommandKiller; + use crate::cmd::docker::{ContainerImage, Docker, DockerError}; + use std::path::Path; + use std::time::Duration; + use url::Url; + + fn private_registry_url() -> Url { + Url::parse("http://localhost:5000").unwrap() + } + + #[test] + fn test_pull() { + let docker = Docker::new(None).unwrap(); + + // Invalid image should fails + let image = ContainerImage { + registry: Url::parse("https://docker.io").unwrap(), + name: "alpine".to_string(), + tags: vec!["666".to_string()], + }; + let ret = docker.pull( + &image, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + assert!(matches!(ret, Err(_))); + + // Valid image should be ok + let image = ContainerImage { + registry: Url::parse("https://docker.io").unwrap(), + name: "alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + + let ret = docker.pull( + &image, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + assert!(matches!(ret, Ok(_))); + + // Should timeout + let ret = docker.pull( + &image, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::from_timeout(Duration::from_secs(1)), + ); + assert!(matches!(ret, Err(DockerError::Timeout(_)))); + } + + #[test] + fn test_docker_build() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(false, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + let ret = docker.build_with_docker( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &[], + &image_cache, + false, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + + assert!(matches!(ret, Ok(_))); + + // It should fails with buildkit dockerfile + let ret = docker.build_with_docker( + Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &[], + &image_cache, + false, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + + assert!(matches!(ret, Err(_))); + } + + #[test] + fn test_buildkit_build() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(true, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + // It should work + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &[], + &image_cache, + false, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + + assert!(matches!(ret, Ok(_))); + + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile.buildkit"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &[], + &image_cache, + false, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + + assert!(matches!(ret, Ok(_))); + } + + #[test] + fn test_push() { + // start a local registry to run this test + // docker run --rm -d -p 5000:5000 --name registry registry:2 + let docker = Docker::new_with_options(true, None).unwrap(); + let image_to_build = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["3.15".to_string()], + }; + let image_cache = ContainerImage { + registry: private_registry_url(), + name: "erebe/alpine".to_string(), + tags: vec!["cache".to_string()], + }; + + // It should work + let ret = docker.build_with_buildkit( + Path::new("tests/docker/multi_stage_simple/Dockerfile"), + Path::new("tests/docker/multi_stage_simple/"), + &image_to_build, + &[], + &image_cache, + false, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + assert!(matches!(ret, Ok(_))); + + let ret = docker.does_image_exist_locally(&image_to_build); + assert!(matches!(ret, Ok(true))); + + let ret = docker.does_image_exist_remotely(&image_to_build); + assert!(matches!(ret, Ok(false))); + + let ret = docker.push( + &image_to_build, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + assert!(matches!(ret, Ok(_))); + + let ret = docker.pull( + &image_to_build, + &mut |msg| println!("{}", msg), + &mut |msg| eprintln!("{}", msg), + &CommandKiller::never(), + ); + assert!(matches!(ret, Ok(_))); + } +} diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index f9a42854..16d319c2 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -10,7 +10,6 @@ use crate::cmd::helm::HelmError::{CannotRollback, CmdError, InvalidKubeConfig, R use crate::cmd::structs::{HelmChart, HelmListItem}; use crate::errors::{CommandError, EngineError}; use crate::events::EventDetails; -use chrono::Duration; use semver::Version; use serde_derive::Deserialize; use std::fs::File; @@ -133,8 +132,8 @@ impl Helm { match helm_exec_with_output( &args, &self.get_all_envs(envs), - |line| stdout.push_str(&line), - |line| stderr.push_str(&line), + &mut |line| stdout.push_str(&line), + &mut |line| stderr.push_str(&line), ) { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { @@ -173,7 +172,7 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), |_| {}, |line| stderr.push_str(&line)) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -206,7 +205,7 @@ impl Helm { ]; let mut stderr = String::new(); - match helm_exec_with_output(&args, &self.get_all_envs(envs), |_| {}, |line| stderr.push_str(&line)) { + match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message()); let error = CommandError::new(stderr, err.message_safe()); @@ -260,8 +259,8 @@ impl Helm { if let Err(cmd_error) = helm_exec_with_output( &helm_args, &self.get_all_envs(envs), - |line| output_string.push(line), - |line| error!("{}", line), + &mut |line| output_string.push(line), + &mut |line| error!("{}", line), ) { return Err(HelmError::CmdError("none".to_string(), LIST, cmd_error)); } @@ -285,10 +284,7 @@ impl Helm { Err(HelmError::CmdError( "none".to_string(), LIST, - CommandError::new( - format!("{}, error: {}", message_safe, e), - Some(message_safe.to_string()), - ), + CommandError::new(format!("{}, error: {}", message_safe, e), Some(message_safe.to_string())), )) } } @@ -362,10 +358,10 @@ impl Helm { let helm_ret = helm_exec_with_output( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(envs), - |line| { + &mut |line| { debug!("{}", line); }, - |line| { + &mut |line| { stderr_msg.push_str(&line); warn!("chart {}: {}", chart.name, line); }, @@ -470,10 +466,10 @@ impl Helm { let helm_ret = helm_exec_with_output( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(envs), - |line| { + &mut |line| { info!("{}", line); }, - |line| { + &mut |line| { warn!("chart {}: {}", chart.name, line); error_message.push(line); }, @@ -521,7 +517,7 @@ impl Helm { self.get_chart_version(chart.name.clone(), Some(chart.get_namespace_string().as_str()), envs)? { if installed_version.le(breaking_version) { - self.uninstall(&chart, envs)?; + self.uninstall(chart, envs)?; } } } @@ -530,21 +526,21 @@ impl Helm { } } -fn helm_exec_with_output( +fn helm_exec_with_output( args: &[&str], envs: &[(&str, &str)], - stdout_output: F, - stderr_output: X, + stdout_output: &mut STDOUT, + stderr_output: &mut STDERR, ) -> Result<(), CommandError> where - F: FnMut(String), - X: FnMut(String), + STDOUT: FnMut(String), + STDERR: FnMut(String), { // Note: Helm CLI use spf13/cobra lib for the CLI; One function is mainly used to return an error if a command failed. // Helm returns an error each time a command does not succeed as they want. Which leads to handling error with status code 1 // It means that the command successfully ran, but it didn't terminate as expected let mut cmd = QoveryCommand::new("helm", args, envs); - match cmd.exec_with_timeout(Duration::max_value(), stdout_output, stderr_output) { + match cmd.exec_with_output(stdout_output, stderr_output) { Err(err) => Err(CommandError::new(format!("{:?}", err), None)), _ => Ok(()), } @@ -576,7 +572,7 @@ mod tests { impl HelmTestCtx { fn cleanup(&self) { - let ret = self.helm.uninstall(&self.chart, &vec![]); + let ret = self.helm.uninstall(&self.chart, &[]); assert!(ret.is_ok()) } @@ -592,7 +588,7 @@ mod tests { ); let mut kube_config = dirs::home_dir().unwrap(); kube_config.push(".kube/config"); - let helm = Helm::new(kube_config.to_str().unwrap(), &vec![]).unwrap(); + let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); let cleanup = HelmTestCtx { helm, chart }; cleanup.cleanup(); @@ -609,14 +605,14 @@ mod tests { #[test] fn check_version() { let mut output = String::new(); - let _ = helm_exec_with_output(&vec!["version"], &vec![], |line| output.push_str(&line), |_line| {}); + let _ = helm_exec_with_output(&["version"], &[], &mut |line| output.push_str(&line), &mut |_line| {}); assert!(output.contains("Version:\"v3.7.2\"")); } #[test] fn test_release_exist() { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-release-exist"); - let ret = helm.check_release_exist(chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)) } @@ -630,19 +626,19 @@ mod tests { chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); // no existing namespace should return an empty array - let ret = helm.list_release(Some("tsdfsfsdf"), &vec![]); + let ret = helm.list_release(Some("tsdfsfsdf"), &[]); assert!(matches!(ret, Ok(vec) if vec.is_empty())); // install something - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // We should have at least one release in all the release - let ret = helm.list_release(None, &vec![]); + let ret = helm.list_release(None, &[]); assert!(matches!(ret, Ok(vec) if !vec.is_empty())); // We should have at least one release in all the release - let ret = helm.list_release(Some(&chart.get_namespace_string()), &vec![]); + let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); assert!(matches!(ret, Ok(vec) if vec.len() == 1)); // Install a second stuff @@ -651,10 +647,10 @@ mod tests { ref mut chart, } = HelmTestCtx::new("test-list-release-2"); chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); - let ret = helm.list_release(Some(&chart.get_namespace_string()), &vec![]); + let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); assert!(matches!(ret, Ok(vec) if vec.len() == 2)); } @@ -662,7 +658,7 @@ mod tests { fn test_upgrade_diff() { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-diff"); - let ret = helm.upgrade_diff(&chart, &vec![]); + let ret = helm.upgrade_diff(chart, &[]); assert!(matches!(ret, Ok(()))); } @@ -671,23 +667,23 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-rollback"); // check release does not exist yet - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // First revision cannot be rollback - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Err(HelmError::CannotRollback(_)))); // 2nd upgrade - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Rollback should be ok now - let ret = helm.rollback(&chart, &vec![]); + let ret = helm.rollback(chart, &[]); assert!(matches!(ret, Ok(()))); } @@ -696,15 +692,15 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(_))); } @@ -717,15 +713,15 @@ mod tests { chart.timeout_in_seconds = 1; // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(HelmError::Timeout(_, _, _)))); // Release should not exist if it fails - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); } @@ -735,7 +731,7 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-with-lock-install"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // Spawn our task killer @@ -746,26 +742,26 @@ mod tests { move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); - let mut cmd = QoveryCommand::new("pkill", &vec!["-9", "-f", &format!("helm.*{}", chart_name)], &vec![]); + let mut cmd = QoveryCommand::new("pkill", &["-9", "-f", &format!("helm.*{}", chart_name)], &[]); let _ = cmd.exec(); } }); // install it barrier.wait(); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if release.is_locked())); // New installation should work even if a lock is present - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if !release.is_locked())); } @@ -778,11 +774,11 @@ mod tests { } = HelmTestCtx::new("test-upgrade-with-lock-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // First install - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Spawn our task killer @@ -793,7 +789,7 @@ mod tests { move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); - let mut cmd = QoveryCommand::new("pkill", &vec!["-9", "-f", &format!("helm.*{}", chart_name)], &vec![]); + let mut cmd = QoveryCommand::new("pkill", &["-9", "-f", &format!("helm.*{}", chart_name)], &[]); let _ = cmd.exec(); } }); @@ -803,19 +799,19 @@ mod tests { value: "6".to_string(), }]; barrier.wait(); - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if release.is_locked() && release.version == 2)); // New installation should work even if a lock is present - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(release) if !release.is_locked() && release.version == 4)); } @@ -824,27 +820,27 @@ mod tests { let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-uninstall"); // check release does not exist yet - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); // deleting something that does not exist should not be an issue - let ret = helm.uninstall(&chart, &vec![]); + let ret = helm.uninstall(chart, &[]); assert!(matches!(ret, Ok(()))); // install it - let ret = helm.upgrade(&chart, &vec![]); + let ret = helm.upgrade(chart, &[]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Ok(_))); // Delete it - let ret = helm.uninstall(&chart, &vec![]); + let ret = helm.uninstall(chart, &[]); assert!(matches!(ret, Ok(()))); // check release does not exist anymore - let ret = helm.check_release_exist(&chart, &vec![]); + let ret = helm.check_release_exist(chart, &[]); assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); } @@ -854,8 +850,8 @@ mod tests { ref helm, ref mut chart, } = HelmTestCtx::new("test-version-release"); - let _ = helm.upgrade(&chart, &vec![]); - let releases = helm.list_release(Some(&chart.get_namespace_string()), &vec![]).unwrap(); + let _ = helm.upgrade(chart, &[]); + let releases = helm.list_release(Some(&chart.get_namespace_string()), &[]).unwrap(); assert_eq!(releases[0].clone().version.unwrap(), Version::new(0, 1, 0)) } } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 9aaf9dff..369488f6 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -1,6 +1,5 @@ use std::path::Path; -use chrono::Duration; use retry::delay::Fibonacci; use retry::OperationResult; use serde::de::DeserializeOwned; @@ -32,8 +31,8 @@ pub enum PodCondition { pub fn kubectl_exec_with_output( args: Vec<&str>, envs: Vec<(&str, &str)>, - stdout_output: F, - stderr_output: X, + stdout_output: &mut F, + stderr_output: &mut X, ) -> Result<(), CommandError> where F: FnMut(String), @@ -41,7 +40,7 @@ where { let mut cmd = QoveryCommand::new("kubectl", &args, &envs); - if let Err(err) = cmd.exec_with_timeout(Duration::max_value(), stdout_output, stderr_output) { + if let Err(err) = cmd.exec_with_output(stdout_output, stderr_output) { let args_string = args.join(" "); let msg = format!("Error on command: kubectl {}. {:?}", args_string, &err); error!("{}", &msg); @@ -82,8 +81,8 @@ where "-o=custom-columns=:.status.containerStatuses..restartCount", ], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; let output_string: String = output_vec.join(""); @@ -106,12 +105,9 @@ where let mut output_vec: Vec = Vec::with_capacity(20); let mut err_output_vec: Vec = Vec::with_capacity(20); let cmd_args = vec!["get", "svc", "-n", namespace, service_name, "-o", "json"]; - let _ = kubectl_exec_with_output( - cmd_args.clone(), - envs.clone(), - |line| output_vec.push(line), - |line| err_output_vec.push(line), - )?; + let _ = kubectl_exec_with_output(cmd_args.clone(), envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + err_output_vec.push(line) + })?; let output_string: String = output_vec.join("\n"); let err_output_string: String = output_vec.join("\n"); @@ -124,7 +120,7 @@ where cmd_args.into_iter().map(|a| a.to_string()).collect(), envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect(), Some(output_string.to_string()), - Some(err_output_string.to_string()), + Some(err_output_string), )), } } @@ -166,13 +162,7 @@ where return Ok(None); } - Ok(Some( - result - .metadata - .annotations - .kubernetes_digitalocean_com_load_balancer_id - .clone(), - )) + Ok(Some(result.metadata.annotations.kubernetes_digitalocean_com_load_balancer_id)) } Err(e) => Err(e), } @@ -197,9 +187,7 @@ where return Ok(None); } - Ok(Some( - result.status.load_balancer.ingress.first().unwrap().hostname.clone(), - )) + Ok(Some(result.status.load_balancer.ingress.first().unwrap().hostname.clone())) } pub fn kubectl_exec_is_pod_ready_with_retry

( @@ -368,8 +356,8 @@ where let result = kubectl_exec_with_output( vec!["get", "namespace", namespace], _envs, - |out| info!("{:?}", out), - |out| warn!("{:?}", out), + &mut |out| info!("{:?}", out), + &mut |out| warn!("{:?}", out), ); result.is_ok() @@ -398,14 +386,14 @@ where let _ = kubectl_exec_with_output( vec!["create", "namespace", namespace], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; } // additional labels - if labels.is_some() { - match kubectl_add_labels_to_namespace(kubernetes_config, namespace, labels.unwrap(), envs) { + if let Some(..) = labels { + match kubectl_add_labels_to_namespace(kubernetes_config, namespace, labels.unwrap_or_default(), envs) { Ok(_) => {} Err(e) => return Err(e), } @@ -450,7 +438,9 @@ where _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); _envs.extend(envs.clone()); - let _ = kubectl_exec_with_output(command_args, _envs, |line| info!("{}", line), |line| error!("{}", line))?; + let _ = kubectl_exec_with_output(command_args, _envs, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + })?; Ok(()) } @@ -459,7 +449,7 @@ where pub fn does_contain_terraform_tfstate

( kubernetes_config: P, namespace: &str, - envs: &Vec<(&str, &str)>, + envs: &[(&str, &str)], ) -> Result where P: AsRef, @@ -526,7 +516,7 @@ pub fn kubectl_exec_delete_namespace

( where P: AsRef, { - if does_contain_terraform_tfstate(&kubernetes_config, &namespace, &envs)? { + if does_contain_terraform_tfstate(&kubernetes_config, namespace, &envs)? { return Err(CommandError::new_from_safe_message( "Namespace contains terraform tfstates in secret, can't delete it !".to_string(), )); @@ -539,8 +529,8 @@ where let _ = kubectl_exec_with_output( vec!["delete", "namespace", namespace], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -561,8 +551,8 @@ where let _ = kubectl_exec_with_output( vec!["delete", "crd", crd_name], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -584,8 +574,8 @@ where let _ = kubectl_exec_with_output( vec!["-n", namespace, "delete", "secret", secret], _envs, - |line| info!("{}", line), - |line| error!("{}", line), + &mut |line| info!("{}", line), + &mut |line| error!("{}", line), )?; Ok(()) @@ -608,8 +598,8 @@ where let _ = kubectl_exec_with_output( vec!["logs", "--tail", "1000", "-n", namespace, "-l", selector], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; Ok(output_vec) @@ -632,8 +622,8 @@ where let _ = kubectl_exec_with_output( vec!["describe", "pod", "-n", namespace, "-l", selector], _envs, - |line| output_vec.push(line), - |line| error!("{}", line), + &mut |line| output_vec.push(line), + &mut |line| error!("{}", line), )?; Ok(output_vec.join("\n")) @@ -674,21 +664,18 @@ pub fn kubectl_exec_rollout_restart_deployment

( kubernetes_config: P, name: &str, namespace: &str, - envs: &Vec<(&str, &str)>, + envs: &[(&str, &str)], ) -> Result<(), CommandError> where P: AsRef, { - let mut environment_variables: Vec<(&str, &str)> = envs.clone(); + let mut environment_variables: Vec<(&str, &str)> = envs.to_owned(); environment_variables.push(("KUBECONFIG", kubernetes_config.as_ref().to_str().unwrap())); let args = vec!["-n", namespace, "rollout", "restart", "deployment", name]; - kubectl_exec_with_output( - args, - environment_variables.clone(), - |line| info!("{}", line), - |line| error!("{}", line), - ) + kubectl_exec_with_output(args, environment_variables, &mut |line| info!("{}", line), &mut |line| { + error!("{}", line) + }) } pub fn kubectl_exec_get_node

( @@ -788,7 +775,7 @@ where P: AsRef, { kubectl_exec::( - vec!["get", "configmap", "-o", "json", "-n", namespace, &name], + vec!["get", "configmap", "-o", "json", "-n", namespace, name], kubernetes_config, envs, ) @@ -829,7 +816,7 @@ where let args = vec!["get", "event", arg_namespace.as_str(), "--sort-by='.lastTimestamp'"]; let mut result_ok = String::new(); - match kubectl_exec_with_output(args, environment_variables, |line| result_ok = line, |_| {}) { + match kubectl_exec_with_output(args, environment_variables, &mut |line| result_ok = line, &mut |_| {}) { Ok(()) => Ok(result_ok), Err(err) => Err(err), } @@ -844,7 +831,7 @@ where P: AsRef, { let result = kubectl_exec::>( - vec!["delete", &object.to_string(), "--all-namespaces", "--all"], + vec!["delete", object, "--all-namespaces", "--all"], kubernetes_config, envs, ); @@ -856,7 +843,7 @@ where if lower_case_message.contains("no resources found") || lower_case_message.ends_with(" deleted") { return Ok(()); } - return Err(e); + Err(e) } } } @@ -929,8 +916,8 @@ where &replicas_count.to_string(), ], _envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ) } @@ -971,21 +958,21 @@ where "scale", "--replicas", &replicas_count.to_string(), - &kind_formatted, + kind_formatted, "--selector", selector, ], _envs.clone(), - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; // deleting pdb in order to be able to upgrade kubernetes version kubectl_exec_with_output( vec!["-n", namespace, "delete", "pdb", "--selector", selector], _envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, )?; let condition = match replicas_count { @@ -1029,8 +1016,8 @@ where "--timeout=300s", ], complete_envs, - |out| info!("{:?}", out), - |out| warn!("{:?}", out), + &mut |out| info!("{:?}", out), + &mut |out| warn!("{:?}", out), ) } @@ -1038,22 +1025,14 @@ pub fn kubectl_get_pvc

(kubernetes_config: P, namespace: &str, envs: Vec<(&str where P: AsRef, { - kubectl_exec::( - vec!["get", "pvc", "-o", "json", "-n", namespace], - kubernetes_config, - envs, - ) + kubectl_exec::(vec!["get", "pvc", "-o", "json", "-n", namespace], kubernetes_config, envs) } pub fn kubectl_get_svc

(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result where P: AsRef, { - kubectl_exec::( - vec!["get", "svc", "-o", "json", "-n", namespace], - kubernetes_config, - envs, - ) + kubectl_exec::(vec!["get", "svc", "-o", "json", "-n", namespace], kubernetes_config, envs) } /// kubectl_delete_crash_looping_pods: delete crash looping pods. @@ -1127,7 +1106,7 @@ where .container_statuses .as_ref() .expect("Cannot get container statuses") - .into_iter() + .iter() .any(|e| { e.state.waiting.as_ref().is_some() && e.state.waiting.as_ref().expect("cannot get container state").reason == KubernetesPodStatusReason::CrashLoopBackOff // check 1 @@ -1175,8 +1154,8 @@ where pod_to_be_deleted.metadata.namespace.as_str(), ], complete_envs, - |_| {}, - |_| {}, + &mut |_| {}, + &mut |_| {}, ) { Ok(_) => Ok(pod_to_be_deleted), Err(e) => Err(CommandError::new(e.message(), None)), @@ -1193,12 +1172,9 @@ where _envs.extend(envs); let mut output_vec: Vec = Vec::with_capacity(50); - let _ = kubectl_exec_with_output( - args.clone(), - _envs.clone(), - |line| output_vec.push(line), - |line| error!("{}", line), - )?; + let _ = kubectl_exec_with_output(args.clone(), _envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + error!("{}", line) + })?; let output_string: String = output_vec.join(""); diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index f486a56a..153aab46 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -1,4 +1,5 @@ pub mod command; +pub mod docker; pub mod helm; pub mod kubectl; pub mod structs; diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index d3cab515..8662d99e 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -9,7 +9,7 @@ use rand::Rng; use retry::Error::Operation; use std::{env, fs, thread, time}; -fn manage_common_issues(terraform_provider_lock: &String, err: &CommandError) -> Result<(), CommandError> { +fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Result<(), CommandError> { // Error: Failed to install provider from shared cache // in order to avoid lock errors on parallel run, let's sleep a bit // https://github.com/hashicorp/terraform/issues/28041 @@ -70,8 +70,8 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> { match result { Ok(_) => Ok(()), - Err(Operation { error, .. }) => return Err(error), - Err(retry::Error::Internal(e)) => return Err(CommandError::new(e, None)), + Err(Operation { error, .. }) => Err(error), + Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), } } @@ -192,19 +192,15 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co let mut stdout = Vec::new(); let mut stderr = Vec::new(); - let mut cmd = QoveryCommand::new( - "terraform", - &args, - &vec![(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())], - ); + let mut cmd = QoveryCommand::new("terraform", &args, &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())]); cmd.set_current_dir(root_dir); let result = cmd.exec_with_output( - |line| { + &mut |line| { info!("{}", line); stdout.push(line); }, - |line| { + &mut |line| { error!("{}", line); stderr.push(line); }, @@ -249,7 +245,7 @@ in the dependency lock file "#; let could_not_load_plugin_error = CommandError::new_from_safe_message(could_not_load_plugin.to_string()); - assert!(manage_common_issues(&"/tmp/do_not_exists".to_string(), &could_not_load_plugin_error).is_ok()); + assert!(manage_common_issues("/tmp/do_not_exists", &could_not_load_plugin_error).is_ok()); } #[test] diff --git a/src/container_registry/docker.rs b/src/container_registry/docker.rs deleted file mode 100644 index 5cd30146..00000000 --- a/src/container_registry/docker.rs +++ /dev/null @@ -1,299 +0,0 @@ -use crate::build_platform::Image; -use crate::cmd; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::Kind; -use crate::error::{SimpleError, SimpleErrorKind}; -use chrono::Duration; -use retry::delay::Fibonacci; -use retry::Error::Operation; -use retry::OperationResult; - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct DockerImageManifest { - pub schema_version: i64, - pub media_type: String, - pub config: Config, - pub layers: Vec, -} - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Config { - pub media_type: String, - pub size: i64, - pub digest: String, -} - -#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Layer { - pub media_type: String, - pub size: i64, - pub digest: String, -} - -pub fn docker_manifest_inspect( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - image_name: String, - image_tag: String, - registry_url: String, -) -> Option { - let image_with_tag = format!("{}:{}", image_name, image_tag); - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - // Note: `docker manifest inspect` is still experimental for the time being: - // https://docs.docker.com/engine/reference/commandline/manifest_inspect/ - let mut envs = docker_envs.clone(); - envs.push(("DOCKER_CLI_EXPERIMENTAL", "enabled")); - - let binary = "docker"; - let image_full_url = format!("{}/{}", registry_url.as_str(), &image_with_tag); - let args = vec!["manifest", "inspect", image_full_url.as_str()]; - let mut raw_output: Vec = vec![]; - - let mut cmd = QoveryCommand::new("docker", &args, &envs); - return match cmd.exec_with_timeout(Duration::minutes(1), |line| raw_output.push(line), |_| {}) { - Ok(_) => { - let joined = raw_output.join(""); - match serde_json::from_str(&joined) { - Ok(extracted_manifest) => Some(extracted_manifest), - Err(e) => { - error!( - "error while trying to deserialize manifest image manifest for image {} in {} ({}): {:?}", - image_with_tag, registry_provider, registry_url, e, - ); - None - } - } - } - Err(e) => { - error!( - "error while trying to inspect image manifest for image {} in {} ({}), command `{}`: {:?}", - image_with_tag, - registry_provider, - registry_url, - cmd::command::command_to_string(binary, &args, &envs), - e, - ); - None - } - }; -} - -pub fn docker_login( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - registry_login: String, - registry_pass: String, - registry_url: String, -) -> Result<(), SimpleError> { - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let binary = "docker"; - let args = vec![ - "login", - registry_url.as_str(), - "-u", - registry_login.as_str(), - "-p", - registry_pass.as_str(), - ]; - - let mut cmd = QoveryCommand::new(binary, &args, &docker_envs); - match cmd.exec() { - Ok(_) => Ok(()), - Err(e) => { - let error_message = format!( - "error while trying to login to registry {} {}, command `{}`: {:?}", - registry_provider, - registry_url, - cmd::command::command_to_string(binary, &args, &docker_envs), - e, - ); - error!("{}", error_message); - - Err(SimpleError::new(SimpleErrorKind::Other, Some(error_message))) - } - } -} - -pub fn docker_tag_and_push_image( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - image: &Image, - dest: String, - dest_latest_tag: String, -) -> Result<(), SimpleError> { - let image_with_tag = image.name_with_tag(); - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let mut cmd = QoveryCommand::new("docker", &vec!["tag", &image_with_tag, dest.as_str()], &docker_envs); - match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - info!("failed to tag image {}, retrying...", image_with_tag); - OperationResult::Retry(e) - } - }) { - Err(Operation { error, .. }) => { - return Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!("failed to tag image {}: {:?}", image_with_tag, error)), - )) - } - _ => {} - } - - let mut cmd = QoveryCommand::new("docker", &vec!["push", dest.as_str()], &docker_envs); - let _ = match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - warn!( - "failed to push image {} on {}, {:?} retrying...", - image_with_tag, registry_provider, e - ); - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "unknown error while trying to push image {} to {}. {:?}", - image_with_tag, registry_provider, e - )), - )), - _ => { - info!("image {} has successfully been pushed", image_with_tag); - Ok(()) - } - }; - - let image_with_latest_tag = image.name_with_latest_tag(); - let mut cmd = QoveryCommand::new( - "docker", - &vec!["tag", &image_with_latest_tag, dest_latest_tag.as_str()], - &docker_envs, - ); - match retry::retry(Fibonacci::from_millis(3000).take(5), || match cmd.exec() { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - info!("failed to tag image {}, retrying...", image_with_latest_tag); - OperationResult::Retry(e) - } - }) { - Err(Operation { error, .. }) => { - return Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!("failed to tag image {}: {:?}", image_with_latest_tag, error)), - )) - } - _ => {} - } - - let mut cmd = QoveryCommand::new("docker", &vec!["push", dest_latest_tag.as_str()], &docker_envs); - match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - warn!( - "failed to push image {} on {}, {:?} retrying...", - image_with_latest_tag, registry_provider, e - ); - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "unknown error while trying to push image {} to {}. {:?}", - image_with_latest_tag, registry_provider, e - )), - )), - _ => { - info!("image {} has successfully been pushed", image_with_latest_tag); - Ok(()) - } - } -} - -pub fn docker_pull_image( - container_registry_kind: Kind, - docker_envs: Vec<(&str, &str)>, - dest: String, -) -> Result<(), SimpleError> { - let registry_provider = match container_registry_kind { - Kind::DockerHub => "DockerHub", - Kind::Ecr => "AWS ECR", - Kind::Docr => "DigitalOcean Registry", - Kind::ScalewayCr => "Scaleway Registry", - }; - - let mut cmd = QoveryCommand::new("docker", &vec!["pull", dest.as_str()], &docker_envs); - match retry::retry(Fibonacci::from_millis(5000).take(5), || { - match cmd.exec_with_timeout( - Duration::minutes(10), - |line| info!("{}", line), - |line| error!("{}", line), - ) { - Ok(_) => OperationResult::Ok(()), - Err(e) => { - warn!( - "failed to pull image from {} registry {}, {:?} retrying...", - registry_provider, - dest.as_str(), - e, - ); - OperationResult::Retry(e) - } - } - }) { - Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))), - Err(e) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "unknown error while trying to pull image {} from {} registry. {:?}", - dest.as_str(), - registry_provider, - e, - )), - )), - _ => { - info!( - "image {} has successfully been pulled from {} registry", - dest.as_str(), - registry_provider, - ); - Ok(()) - } - } -} diff --git a/src/container_registry/docker_hub.rs b/src/container_registry/docker_hub.rs deleted file mode 100644 index db4e2131..00000000 --- a/src/container_registry/docker_hub.rs +++ /dev/null @@ -1,259 +0,0 @@ -extern crate reqwest; - -use reqwest::StatusCode; - -use crate::build_platform::Image; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult}; -use crate::error::EngineErrorCause; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; - -pub struct DockerHub { - context: Context, - id: String, - name: String, - login: String, - password: String, - listeners: Listeners, -} - -impl DockerHub { - pub fn new(context: Context, id: &str, name: &str, login: &str, password: &str) -> Self { - DockerHub { - context, - id: id.to_string(), - name: name.to_string(), - login: login.to_string(), - password: password.to_string(), - listeners: vec![], - } - } - - pub fn exec_docker_login(&self) -> Result<(), EngineError> { - let envs = match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - }; - - let mut cmd = QoveryCommand::new( - "docker", - &vec!["login", "-u", self.login.as_str(), "-p", self.password.as_str()], - &envs, - ); - - match cmd.exec() { - Ok(_) => Ok(()), - Err(_) => Err(self.engine_error( - EngineErrorCause::User( - "Your DockerHub account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to DockerHub {}", self.name_with_id()), - )), - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), vec![], dest.clone()) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), - )), - } - } -} - -impl ToTransmitter for DockerHub { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } -} - -impl ContainerRegistry for DockerHub { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::DockerHub - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn is_valid(&self) -> Result<(), NewEngineError> { - Ok(()) - } - - fn on_create(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn does_image_exists(&self, image: &Image) -> bool { - use reqwest::blocking::Client; - let client = Client::new(); - let path = format!( - "https://index.docker.io/v1/repositories/{}/{}/tags", - &self.login, image.name - ); - let res = client - .get(path.as_str()) - .basic_auth(&self.login, Option::from(&self.password)) - .send(); - - // TODO (mzo) no check of existing tags as in others impl ? - match res { - Ok(out) => matches!(out.status(), StatusCode::OK), - Err(e) => { - error!("While trying to retrieve if DockerHub repository exist {:?}", e); - false - } - } - } - - fn pull(&self, image: &Image) -> Result { - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!( - "image {:?} does not exist in DockerHub {} repository", - image, - self.name() - ); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from DockerHub {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login()?; - - let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str()); - - // pull image - self.pull_image(dest, image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let _ = self.exec_docker_login()?; - - let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str()); - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {:?} found on DockerHub {} repository, container build is not required", - image, - self.name() - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image {:?} does not exist on DockerHub {} repository, starting image upload", - image, - self.name() - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}/{}:latest", self.login.as_str(), image.name); - match docker_tag_and_push_image(self.kind(), vec![], &image, dest.clone(), dest_latest_tag) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PushResult { image }) - } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )), - } - } - - fn push_error(&self, _image: &Image) -> Result { - unimplemented!() - } -} - -impl Listen for DockerHub { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index eeabe76d..a90c1e73 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -5,21 +5,15 @@ use serde::{Deserialize, Serialize}; use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult}; -use crate::error::{cast_simple_error_to_engine_error, EngineErrorCause, SimpleError, SimpleErrorKind}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::utilities; -use retry::delay::Fixed; -use retry::Error::Operation; -use retry::OperationResult; +use url::Url; const CR_API_PATH: &str = "https://api.digitalocean.com/v2/registry"; const CR_CLUSTER_API_PATH: &str = "https://api.digitalocean.com/v2/kubernetes/registry"; +const CR_REGISTRY_DOMAIN: &str = "registry.digitalocean.com"; // TODO : use --output json // see https://www.digitalocean.com/community/tutorials/how-to-use-doctl-the-official-digitalocean-command-line-client @@ -29,46 +23,56 @@ pub struct DOCR { pub name: String, pub api_key: String, pub id: String, + pub registry_info: ContainerRegistryInfo, pub listeners: Listeners, } impl DOCR { - pub fn new(context: Context, id: &str, name: &str, api_key: &str) -> Self { - DOCR { + pub fn new( + context: Context, + id: &str, + name: &str, + api_key: &str, + listener: Listener, + ) -> Result { + let registry_name = name.to_string(); + let registry_name2 = name.to_string(); + let mut registry = Url::parse(&format!("https://{}", CR_REGISTRY_DOMAIN)).unwrap(); + let _ = registry.set_username(api_key); + let _ = registry.set_password(Some(api_key)); + + let registry_info = ContainerRegistryInfo { + endpoint: registry, + registry_name: name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(move |img_name| format!("{}/{}", registry_name, img_name)), + get_repository_name: Box::new(move |_| registry_name2.to_string()), + }; + + let cr = DOCR { context, - name: name.into(), + name: name.to_string(), api_key: api_key.into(), id: id.into(), - listeners: vec![], + listeners: vec![listener], + registry_info, + }; + + if cr.context.docker.login(&cr.registry_info.endpoint).is_err() { + return Err(ContainerRegistryError::InvalidCredentials); } + + Ok(cr) } - fn get_registry_name(&self, image: &Image) -> Result { - let registry_name = match image.registry_name.as_ref() { - // DOCR does not support upper cases - Some(registry_name) => registry_name.to_lowercase(), - None => cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - get_current_registry_name(self.api_key.as_str()), - )?, - }; - - Ok(registry_name) - } - - fn create_repository(&self, image: &Image) -> Result<(), EngineError> { - let registry_name = match image.registry_name.as_ref() { - // DOCR does not support upper cases - Some(registry_name) => registry_name.to_lowercase(), - None => self.name.clone(), - }; - + fn create_registry(&self, registry_name: &str) -> Result<(), ContainerRegistryError> { + // DOCR does not support upper cases + let registry_name = registry_name.to_lowercase(); let headers = utilities::get_header_with_bearer(&self.api_key); // subscription_tier_slug: https://www.digitalocean.com/products/container-registry/ // starter and basic tiers are too limited on repository creation let repo = DoApiCreateRepository { - name: registry_name.clone(), + name: registry_name.to_string(), subscription_tier_slug: "professional".to_string(), }; @@ -85,88 +89,42 @@ impl DOCR { StatusCode::OK => Ok(()), StatusCode::CREATED => Ok(()), status => { - warn!("status from DO registry API {}", status); - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "Bad status code : {} returned by the DO registry API for creating DO CR {}", + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( + "Bad status code: `{}` returned by the DO registry API for creating DOCR `{}`.", status, registry_name.as_str(), ), - )); + }); } }, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("failed to create DOCR repository {} : {:?}", registry_name.as_str(), e,), - )); + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( + "Failed to create DOCR repository `{}`, error: {}.", + registry_name.as_str(), + e, + ), + }); } } } Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("Unable to initialize DO Registry {} : {:?}", registry_name.as_str(), e,), - )); + return Err(ContainerRegistryError::CannotCreateRegistry { + registry_name: registry_name.to_string(), + raw_error_message: format!( + "Failed to create DOCR repository `{}`, error: {}.", + registry_name.as_str(), + e, + ), + }); } } } - fn push_image(&self, registry_name: String, dest: String, image: &Image) -> Result { - let dest_latest_tag = format!( - "registry.digitalocean.com/{}/{}:latest", - registry_name.as_str(), - image.name - ); - let _ = match docker_tag_and_push_image(self.kind(), vec![], &image, dest.clone(), dest_latest_tag) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )); - } - }; - - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - - let result = retry::retry(Fixed::from_millis(10000).take(12), || { - match self.does_image_exists(&image) { - true => OperationResult::Ok(&image), - false => { - warn!("image is not yet available on Digital Ocean Registry, retrying in a few seconds..."); - OperationResult::Retry(()) - } - } - }); - - let image_not_reachable = Err(self.engine_error( - EngineErrorCause::Internal, - "image has been pushed on Digital Ocean Registry but is not yet available after 2min. Please try to redeploy in a few minutes".to_string(), - )); - match result { - Ok(_) => Ok(PushResult { image }), - Err(Operation { .. }) => image_not_reachable, - Err(retry::Error::Internal(_)) => image_not_reachable, - } - } - - pub fn get_image(&self, _image: &Image) -> Option<()> { - todo!() - } - - pub fn delete_image(&self, _image: &Image) -> Result<(), EngineError> { - // TODO(benjaminch): To be implemented later on, but note it must not slow down CI workflow - Ok(()) - } - - pub fn delete_repository(&self) -> Result<(), EngineError> { + pub fn delete_registry(&self) -> Result<(), ContainerRegistryError> { let headers = utilities::get_header_with_bearer(&self.api_key); let res = reqwest::blocking::Client::new() .delete(CR_API_PATH) @@ -177,67 +135,36 @@ impl DOCR { Ok(out) => match out.status() { StatusCode::NO_CONTENT => Ok(()), status => { - warn!("delete status from DO registry API {}", status); - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "Bad status code : {} returned by the DO registry API for deleting DOCR repository", + return Err(ContainerRegistryError::CannotDeleteRegistry { + registry_name: "default".to_string(), + raw_error_message: format!( + "Bad status code: `{}` returned by the DO registry API for deleting DOCR.", status, ), - )); + }); } }, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!("No response from the Digital Ocean API : {:?}", e), - )); + return Err(ContainerRegistryError::CannotDeleteRegistry { + registry_name: "default".to_string(), + raw_error_message: format!("No response from the Digital Ocean API, error: {}", e), + }); } } } - pub fn exec_docr_login(&self) -> Result<(), EngineError> { + pub fn exec_docr_login(&self) -> Result<(), ContainerRegistryError> { let mut cmd = QoveryCommand::new( "doctl", - &vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], - &vec![], + &["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()], + &[], ); match cmd.exec() { Ok(_) => Ok(()), - Err(_) => Err(self.engine_error( - EngineErrorCause::User( - "Your DOCR account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to DOCR {}", self.name_with_id()), - )), + Err(_) => Err(ContainerRegistryError::InvalidCredentials), } } - - fn pull_image(&self, registry_name: String, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), vec![], dest.clone()) { - Ok(_) => { - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), - )), - } - } -} - -impl ToTransmitter for DOCR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } } impl ContainerRegistry for DOCR { @@ -257,40 +184,30 @@ impl ContainerRegistry for DOCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn registry_info(&self) -> &ContainerRegistryInfo { + &self.registry_info + } + + fn create_registry(&self) -> Result<(), ContainerRegistryError> { + // Digital Ocean only allow one registry per account... + if get_current_registry_name(self.api_key.as_str()).is_err() { + let _ = self.create_registry(self.name())?; + } + Ok(()) } - fn on_create(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { + fn create_repository(&self, _repository_name: &str) -> Result<(), ContainerRegistryError> { + // Nothing to do, DO only allow one registry and create repository on the flight when image are pushed Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { - let registry_name = match self.get_registry_name(image) { - Ok(registry_name) => registry_name, - Err(err) => { - warn!("{:?}", err); - return false; - } - }; - let headers = utilities::get_header_with_bearer(self.api_key.as_str()); let url = format!( "https://api.digitalocean.com/v2/registry/{}/repositories/{}/tags", - registry_name, - image.name.as_str() + image.registry_name, + image.name_without_repository() ); let res = reqwest::blocking::Client::new() @@ -302,20 +219,10 @@ impl ContainerRegistry for DOCR { Ok(output) => match output.status() { StatusCode::OK => output.text(), _ => { - error!( - "While tyring to get all tags for image: {}, maybe this image not exist !", - &image.name - ); - return false; } }, Err(_) => { - error!( - "While trying to communicate with DigitalOcean API to retrieve all tags for image {}", - &image.name - ); - return false; } }; @@ -333,139 +240,12 @@ impl ContainerRegistry for DOCR { false } - Err(_) => { - error!( - "Unable to deserialize tags from DigitalOcean API for image {}", - &image.tag - ); - - false - } + Err(_) => false, } } - _ => { - error!( - "while retrieving tags for image {} Unable to get output from DigitalOcean API", - &image.name - ); - - false - } + _ => false, } } - - fn pull(&self, image: &Image) -> Result { - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!("image {:?} does not exist in DOCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from DOCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docr_login()?; - - let registry_name = self.get_registry_name(image)?; - - let dest = format!( - "registry.digitalocean.com/{}/{}", - registry_name.as_str(), - image.name_with_tag() - ); - - // pull image - self.pull_image(registry_name, dest, image) - } - - // https://www.digitalocean.com/docs/images/container-registry/how-to/use-registry-docker-kubernetes/ - fn push(&self, image: &Image, force_push: bool) -> Result { - let registry_name = self.get_registry_name(image)?; - - match self.create_repository(image) { - Ok(_) => info!("DOCR {} has been created", registry_name.as_str()), - Err(_) => warn!("DOCR {} already exists", registry_name.as_str()), - }; - - let _ = self.exec_docr_login()?; - - let dest = format!( - "registry.digitalocean.com/{}/{}", - registry_name.as_str(), - image.name_with_tag() - ); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {:?} found on DOCR {} repository, container build is not required", - image, - registry_name.as_str() - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_name = Some(registry_name.clone()); - // on DOCR registry secret is the same as registry name - image.registry_secret = Some(registry_name); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image {:?} does not exist on DOCR {} repository, starting image upload", - image, registry_name - ); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - self.push_image(registry_name, dest, image) - } - - fn push_error(&self, image: &Image) -> Result { - Ok(PushResult { image: image.clone() }) - } } impl Listen for DOCR { @@ -478,7 +258,10 @@ impl Listen for DOCR { } } -pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: &str) -> Result<(), SimpleError> { +pub fn subscribe_kube_cluster_to_container_registry( + api_key: &str, + cluster_uuid: &str, +) -> Result<(), ContainerRegistryError> { let headers = utilities::get_header_with_bearer(api_key); let cluster_ids = DoApiSubscribeToKubeCluster { cluster_uuids: vec![cluster_uuid.to_string()], @@ -496,31 +279,28 @@ pub fn subscribe_kube_cluster_to_container_registry(api_key: &str, cluster_uuid: match res { Ok(output) => match output.status() { StatusCode::NO_CONTENT => Ok(()), - status => { - warn!("status from DO registry API {}", status); - Err(SimpleError::new(SimpleErrorKind::Other, Some("Incorrect Status received from Digital Ocean when tyring to subscribe repository to cluster"))) - } + status => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!("Incorrect Status `{}` received from Digital Ocean when tyring to subscribe repository to cluster", status), + }), }, - Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to call Digital Ocean when tyring to subscribe repository to cluster"), - )) - } + Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!("Unable to call Digital Ocean when tyring to subscribe repository to cluster, error: {}", e), + }), } } - Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to Serialize digital ocean cluster uuids"), - )) - } + Err(e) => Err(ContainerRegistryError::CannotLinkRegistryToCluster { + registry_name: "default".to_string(), + cluster_id: cluster_uuid.to_string(), + raw_error_message: format!("Unable to Serialize digital ocean cluster uuids, error: {}", e), + }), }; } -pub fn get_current_registry_name(api_key: &str) -> Result { +pub fn get_current_registry_name(api_key: &str) -> Result { let headers = utilities::get_header_with_bearer(api_key); let res = reqwest::blocking::Client::new() .get(CR_API_PATH) @@ -535,30 +315,30 @@ pub fn get_current_registry_name(api_key: &str) -> Result { match res_registry { Ok(registry) => Ok(registry.registry.name), - Err(err) => Err(SimpleError::new( - SimpleErrorKind::Other, - Some(format!( - "An error occurred while deserializing JSON coming from Digital Ocean API: error: {:?}", + Err(err) => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Seems there is no registry set (DO has only one registry), error: {}.", err - )), - )), + ), + }), } } - status => { - warn!("status from Digital Ocean Registry API {}", status); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Incorrect Status received from Digital Ocean when tyring to get container registry"), - )) - } + status => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Incorrect status `{}` received from Digital Ocean when tyring to get container registry.", + status + ), + }), }, - Err(e) => { - error!("{:?}", e); - Err(SimpleError::new( - SimpleErrorKind::Other, - Some("Unable to call Digital Ocean when tyring to fetch the container registry name"), - )) - } + Err(e) => Err(ContainerRegistryError::RegistryDoesntExist { + registry_name: "default".to_string(), + raw_error_message: format!( + "Unable to call Digital Ocean when tyring to fetch the container registry name, error: {}.", + e, + ), + }), }; } diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index 9867b5de..83363bdb 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -1,3 +1,5 @@ +#![allow(clippy::field_reassign_with_default)] + use std::str::FromStr; use rusoto_core::{Client, HttpClient, Region, RusotoError}; @@ -9,20 +11,19 @@ use rusoto_ecr::{ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient}; use crate::build_platform::Image; -use crate::cmd::command::QoveryCommand; -use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image}; -use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; -use crate::error::{EngineError, EngineErrorCause}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; -use crate::models::{ +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; +use crate::events::{EngineEvent, EventMessage, GeneralStep, Stage}; +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; use crate::runtime::block_on; use retry::delay::Fixed; use retry::Error::Operation; use retry::OperationResult; use serde_json::json; +use url::Url; pub struct ECR { context: Context, @@ -31,7 +32,9 @@ pub struct ECR { access_key_id: String, secret_access_key: String, region: Region, + registry_info: Option, listeners: Listeners, + logger: Box, } impl ECR { @@ -42,25 +45,65 @@ impl ECR { access_key_id: &str, secret_access_key: &str, region: &str, - ) -> Self { - ECR { + listener: Listener, + logger: Box, + ) -> Result { + let mut cr = ECR { context, id: id.to_string(), name: name.to_string(), access_key_id: access_key_id.to_string(), secret_access_key: secret_access_key.to_string(), region: Region::from_str(region).unwrap(), - listeners: vec![], - } + registry_info: None, + listeners: vec![listener], + logger, + }; + + let credentials = cr.get_credentials()?; + let mut registry_url = Url::parse(credentials.endpoint_url.as_str()).unwrap(); + let _ = registry_url.set_username(&credentials.access_token); + let _ = registry_url.set_password(Some(&credentials.password)); + + cr.log_info(format!("🔓 Login to ECR registry {}", credentials.endpoint_url)); + let _ = cr + .context + .docker + .login(®istry_url) + .map_err(|_err| ContainerRegistryError::InvalidCredentials)?; + + let registry_info = ContainerRegistryInfo { + endpoint: registry_url, + registry_name: cr.name.to_string(), + registry_docker_json_config: None, + get_image_name: Box::new(|img_name| img_name.to_string()), + get_repository_name: Box::new(|imag_name| imag_name.to_string()), + }; + + cr.registry_info = Some(registry_info); + cr.is_credentials_valid()?; + Ok(cr) + } + + pub fn log_info(&self, msg: String) { + self.logger.log(EngineEvent::Info( + self.get_event_details(Stage::General(GeneralStep::ValidateSystemRequirements)), + EventMessage::new_from_safe(msg.clone()), + )); + + let lh = ListenersHelper::new(&self.listeners); + lh.deployment_in_progress(ProgressInfo::new( + ProgressScope::Environment { + id: self.context.execution_id().to_string(), + }, + ProgressLevel::Info, + Some(msg), + self.context.execution_id(), + )); } pub fn credentials(&self) -> StaticProvider { - StaticProvider::new( - self.access_key_id.to_string(), - self.secret_access_key.to_string(), - None, - None, - ) + StaticProvider::new(self.access_key_id.to_string(), self.secret_access_key.to_string(), None, None) } pub fn client(&self) -> Client { @@ -71,9 +114,9 @@ impl ECR { EcrClient::new_with_client(self.client(), self.region.clone()) } - fn get_repository(&self, image: &Image) -> Option { + fn get_repository(&self, repository_name: &str) -> Option { let mut drr = DescribeRepositoriesRequest::default(); - drr.repository_names = Some(vec![image.name.to_string()]); + drr.repository_names = Some(vec![repository_name.to_string()]); let r = block_on(self.ecr_client().describe_repositories(drr)); @@ -89,7 +132,7 @@ impl ECR { fn get_image(&self, image: &Image) -> Option { let mut dir = DescribeImagesRequest::default(); - dir.repository_name = image.name.to_string(); + dir.repository_name = image.name(); let mut image_identifier = ImageIdentifier::default(); image_identifier.image_tag = Some(image.tag.to_string()); @@ -107,54 +150,7 @@ impl ECR { } } - fn docker_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - } - } - - fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { - // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-push-ecr-image.html - // docker tag e9ae3c220b23 aws_account_id.dkr.ecr.region.amazonaws.com/my-web-app - - match docker_tag_and_push_image(self.kind(), self.docker_envs(), &image, dest.clone(), dest_latest_tag) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PushResult { image }) - } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )), - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - // READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html - // docker pull aws_account_id.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest - - match docker_pull_image(self.kind(), self.docker_envs(), dest.clone()) { - Ok(_) => { - let mut image = image.clone(); - image.registry_url = Some(dest); - Ok(PullResult::Some(image)) - } - Err(e) => Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), - )), - } - } - - fn create_repository(&self, image: &Image) -> Result { - let repository_name = image.name.as_str(); - info!("creating ECR repository {}", &repository_name); - - let mut repo_creation_counter = 0; + fn create_repository(&self, repository_name: &str) -> Result { let container_registry_request = DescribeRepositoriesRequest { repository_names: Some(vec![repository_name.to_string()]), ..Default::default() @@ -167,53 +163,52 @@ impl ECR { // ensure repository is created // need to do all this checks and retry because of several issues encountered like: 200 API response code while repo is not created let repo_created = retry::retry(Fixed::from_millis(5000).take(24), || { - match block_on( + let repositories = block_on( self.ecr_client() .describe_repositories(container_registry_request.clone()), - ) { - Ok(x) => { - debug!("created {:?} repository", x); - OperationResult::Ok(()) - } - Err(e) => { - match e { - RusotoError::Service(s) => match s { - DescribeRepositoriesError::RepositoryNotFound(_) => { - if repo_creation_counter != 0 { - warn!( - "repository {} was not found, {}x retrying...", - &repository_name, &repo_creation_counter - ); - } - repo_creation_counter += 1; - } - _ => warn!("{:?}", s), - }, - _ => warn!("{:?}", e), + ); + match repositories { + // Repo already exist, so ok + Ok(_) => OperationResult::Ok(()), + + // Repo does not exist, so creating it + Err(RusotoError::Service(DescribeRepositoriesError::RepositoryNotFound(_))) => { + if let Err(err) = block_on(self.ecr_client().create_repository(crr.clone())) { + OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + })) + } else { + // The Repo should be created at this point, but we want to verify that + // the describe/list return it now. we want to reloop so return a retry instead of a ok + OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: "Retry to check repository exist".to_string(), + })) } - - let msg = match block_on(self.ecr_client().create_repository(crr.clone())) { - Ok(_) => format!("repository {} created", &repository_name), - Err(err) => format!( - "can't create ECR repository {} for {}. {:?}", - &repository_name, - self.name_with_id(), - err - ), - }; - - OperationResult::Retry(Err(self.engine_error(EngineErrorCause::Internal, msg))) } + + // Unknown error, so retries ¯\_(ツ)_/¯ + Err(err) => OperationResult::Retry(Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + })), } }); match repo_created { - Ok(_) => info!( - "repository {} created after {} attempt(s)", - &repository_name, repo_creation_counter - ), + Ok(_) => {} Err(Operation { error, .. }) => return error, - Err(retry::Error::Internal(e)) => return Err(self.engine_error(EngineErrorCause::Internal, e)), + Err(retry::Error::Internal(e)) => { + return Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: e, + }) + } }; // apply retention policy @@ -240,45 +235,34 @@ impl ECR { }); let plp = PutLifecyclePolicyRequest { - repository_name: image.name.clone(), + repository_name: repository_name.to_string(), lifecycle_policy_text: lifecycle_policy_text.to_string(), ..Default::default() }; match block_on(self.ecr_client().put_lifecycle_policy(plp)) { - Err(err) => { - error!( - "can't set lifecycle policy to ECR repository {} for {}: {}", - image.name.as_str(), - self.name_with_id(), - err - ); - - Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "can't set lifecycle policy to ECR repository {} for {}", - image.name.as_str(), - self.name_with_id() - ), - )) - } - _ => Ok(self.get_repository(image).expect("cannot get repository")), + Err(err) => Err(ContainerRegistryError::CannotSetRepositoryLifecyclePolicy { + registry_name: self.name.to_string(), + repository_name: repository_name.to_string(), + raw_error_message: err.to_string(), + }), + _ => Ok(self.get_repository(repository_name).expect("cannot get repository")), } } - fn get_or_create_repository(&self, image: &Image) -> Result { + fn get_or_create_repository(&self, repository_name: &str) -> Result { + self.log_info(format!("🗂️ Provisioning container repository {}", repository_name)); + // check if the repository already exists - let repository = self.get_repository(image); - if repository.is_some() { - info!("ECR repository {} already exists", image.name.as_str()); - return Ok(repository.unwrap()); + let repository = self.get_repository(repository_name); + if let Some(repo) = repository { + return Ok(repo); } - self.create_repository(image) + self.create_repository(repository_name) } - fn get_credentials(&self) -> Result { + fn get_credentials(&self) -> Result { let r = block_on( self.ecr_client() .get_authorization_token(GetAuthorizationTokenRequest::default()), @@ -302,62 +286,25 @@ impl ECR { ) } None => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to retrieve credentials and endpoint URL from ECR {}", - self.name_with_id(), - ), - )); + return Err(ContainerRegistryError::CannotGetCredentials); } }, _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to retrieve credentials and endpoint URL from ECR {}", - self.name_with_id(), - ), - )); + return Err(ContainerRegistryError::CannotGetCredentials); } }; Ok(ECRCredentials::new(access_token, password, endpoint_url)) } - fn exec_docker_login(&self) -> Result<(), EngineError> { - let credentials = self.get_credentials()?; + fn is_credentials_valid(&self) -> Result<(), ContainerRegistryError> { + let client = StsClient::new_with_client(self.client(), Region::default()); + let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); - let mut cmd = QoveryCommand::new( - "docker", - &vec![ - "login", - "-u", - credentials.access_token.as_str(), - "-p", - credentials.password.as_str(), - credentials.endpoint_url.as_str(), - ], - &self.docker_envs(), - ); - - if let Err(_) = cmd.exec() { - return Err(self.engine_error( - EngineErrorCause::User( - "Your ECR account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to ECR {}", self.name_with_id()), - )); - }; - - Ok(()) - } -} - -impl ToTransmitter for ECR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) + match s { + Ok(_) => Ok(()), + Err(_) => Err(ContainerRegistryError::InvalidCredentials), + } } } @@ -378,168 +325,24 @@ impl ContainerRegistry for ECR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { - let client = StsClient::new_with_client(self.client(), Region::default()); - let s = block_on(client.get_caller_identity(GetCallerIdentityRequest::default())); - - match s { - Ok(_) => Ok(()), - Err(_) => Err(NewEngineError::new_client_invalid_cloud_provider_credentials( - self.get_event_details(), - )), - } + fn registry_info(&self) -> &ContainerRegistryInfo { + // At this point the registry info should be initialize, so unwrap is safe + self.registry_info.as_ref().unwrap() } - fn on_create(&self) -> Result<(), EngineError> { - info!("ECR.on_create() called"); + fn create_registry(&self) -> Result<(), ContainerRegistryError> { + // Nothing to do, ECR require to create only repository Ok(()) } - fn on_create_error(&self) -> Result<(), EngineError> { - unimplemented!() - } - - fn on_delete(&self) -> Result<(), EngineError> { - unimplemented!() - } - - fn on_delete_error(&self) -> Result<(), EngineError> { - unimplemented!() + fn create_repository(&self, name: &str) -> Result<(), ContainerRegistryError> { + let _ = self.get_or_create_repository(name)?; + Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { self.get_image(image).is_some() } - - fn pull(&self, image: &Image) -> Result { - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !self.does_image_exists(image) { - let info_message = format!("image {:?} does not exist in ECR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from ECR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login()?; - - let repository = match self.get_or_create_repository(image) { - Ok(r) => r, - _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to create ECR repository for {} with image {:?}", - self.name_with_id(), - image, - ), - )); - } - }; - - let dest = format!("{}:{}", repository.repository_uri.unwrap(), image.tag.as_str()); - - // pull image - self.pull_image(dest, image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let _ = self.exec_docker_login()?; - - let repository = match if force_push { - self.create_repository(image) - } else { - self.get_or_create_repository(image) - } { - Ok(r) => r, - _ => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "failed to create ECR repository for {} with image {:?}", - self.name_with_id(), - image, - ), - )); - } - }; - - let repository_uri = repository.repository_uri.unwrap(); - let dest = format!("{}:{}", repository_uri, image.tag.as_str()); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {:?} found on ECR {} repository, container build is not required", - image, - self.name() - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let mut image = image.clone(); - image.registry_url = Some(dest); - - return Ok(PushResult { image }); - } - - let info_message = format!( - "image {:?} does not exist on ECR {} repository, starting image upload", - image, - self.name() - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}:latest", repository_uri); - self.push_image(dest, dest_latest_tag, image) - } - - fn push_error(&self, image: &Image) -> Result { - // TODO change this - Ok(PushResult { image: image.clone() }) - } } impl Listen for ECR { diff --git a/src/container_registry/errors.rs b/src/container_registry/errors.rs new file mode 100644 index 00000000..830e9f32 --- /dev/null +++ b/src/container_registry/errors.rs @@ -0,0 +1,68 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum ContainerRegistryError { + #[error("Invalid credentials error.")] + InvalidCredentials, + #[error("Cannot get credentials error.")] + CannotGetCredentials, + #[error("Cannot create registry error for `{registry_name:?}`: {raw_error_message:?}.")] + CannotCreateRegistry { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot delete registry error for `{registry_name:?}`: {raw_error_message:?}.")] + CannotDeleteRegistry { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot delete image `{image_name:?}` error from repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotDeleteImage { + registry_name: String, + repository_name: String, + image_name: String, + raw_error_message: String, + }, + #[error("Image `{image_name:?}` doesn't exist in repository `{repository_name:?}` in registry `{registry_name:?}` error.")] + ImageDoesntExistInRegistry { + registry_name: String, + repository_name: String, + image_name: String, + }, + #[error("Repository `{repository_name:?}` doesn't exist in registry `{registry_name:?}` error.")] + RepositoryDoesntExistInRegistry { + registry_name: String, + repository_name: String, + }, + #[error("Registry `{registry_name:?}` doesn't exist, error: {raw_error_message:?}.")] + RegistryDoesntExist { + registry_name: String, + raw_error_message: String, + }, + #[error("Cannot link registry `{registry_name:?}` to cluster `{cluster_id:?}`: {raw_error_message:?}.")] + CannotLinkRegistryToCluster { + registry_name: String, + cluster_id: String, + raw_error_message: String, + }, + #[error("Cannot create repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotCreateRepository { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, + #[error( + "Cannot delete repository `{repository_name:?}` from registry `{registry_name:?}`: {raw_error_message:?}." + )] + CannotDeleteRepository { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, + #[error("Cannot set lifecycle policy for repository `{repository_name:?}` in registry `{registry_name:?}`: {raw_error_message:?}.")] + CannotSetRepositoryLifecyclePolicy { + registry_name: String, + repository_name: String, + raw_error_message: String, + }, +} diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 7a9bdea6..28f10a5b 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -1,18 +1,18 @@ use serde::{Deserialize, Serialize}; +use url::Url; use crate::build_platform::Image; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; -use crate::models::{Context, Listen, QoveryIdentifier}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::errors::EngineError; +use crate::events::{EventDetails, Stage, Transmitter}; +use crate::io_models::{Context, Listen, QoveryIdentifier}; -pub mod docker; -pub mod docker_hub; pub mod docr; pub mod ecr; +pub mod errors; pub mod scaleway_container_registry; -pub trait ContainerRegistry: Listen + ToTransmitter { +pub trait ContainerRegistry: Listen { fn context(&self) -> &Context; fn kind(&self) -> Kind; fn id(&self) -> &str; @@ -20,53 +20,60 @@ pub trait ContainerRegistry: Listen + ToTransmitter { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), NewEngineError>; - fn on_create(&self) -> Result<(), EngineError>; - fn on_create_error(&self) -> Result<(), EngineError>; - fn on_delete(&self) -> Result<(), EngineError>; - fn on_delete_error(&self) -> Result<(), EngineError>; + + // Get info for this registry, url endpoint with login/password, image name convention, ... + fn registry_info(&self) -> &ContainerRegistryInfo; + + // Some provider require specific action in order to allow container registry + // For now it is only digital ocean, that require 2 steps to have registries + fn create_registry(&self) -> Result<(), ContainerRegistryError>; + + // Call to create a specific repository in the registry + // i.e: docker.io/erebe or docker.io/qovery + // All providers requires action for that + // The convention for us is that we create one per application + fn create_repository(&self, repository_name: &str) -> Result<(), ContainerRegistryError>; + + // Check on the registry if a specific image already exist fn does_image_exists(&self, image: &Image) -> bool; - fn pull(&self, image: &Image) -> Result; - fn push(&self, image: &Image, force_push: bool) -> Result; - fn push_error(&self, image: &Image) -> Result; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } - fn get_event_details(&self) -> EventDetails { + + fn get_event_details(&self, stage: Stage) -> EventDetails { let context = self.context(); - EventDetails::new( + let ev = EventDetails::new( None, QoveryIdentifier::from(context.organization_id().to_string()), QoveryIdentifier::from(context.cluster_id().to_string()), QoveryIdentifier::from(context.execution_id().to_string()), None, - Stage::Environment(EnvironmentStep::Build), - self.to_transmitter(), - ) + stage, + Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()), + ); + + ev } } -pub struct PushResult { - pub image: Image, +pub fn to_engine_error(event_details: EventDetails, err: ContainerRegistryError) -> EngineError { + EngineError::new_container_registry_error(event_details, err) } -pub enum PullResult { - Some(Image), - None, +pub struct ContainerRegistryInfo { + pub endpoint: Url, // Contains username and password if necessary + pub registry_name: String, + pub registry_docker_json_config: Option, + // give it the name of your image, and it returns the full name with prefix if needed + // i.e: for DigitalOcean => registry_name/image_name + // i.e: fo scaleway => image_name/image_name + // i.e: for AWS => image_name + pub get_image_name: Box String>, + + // Give it the name of your image, and it return the name of the repository that will be used + pub get_repository_name: Box String>, } #[derive(Serialize, Deserialize, Clone, Copy, Debug)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { - DockerHub, Ecr, Docr, ScalewayCr, diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 5102a145..3382b07b 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -1,33 +1,23 @@ extern crate scaleway_api_rs; -use crate::cloud_provider::scaleway::application::ScwZone; - use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; -use crate::container_registry::docker::{ - docker_login, docker_manifest_inspect, docker_pull_image, docker_tag_and_push_image, -}; -use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult}; -use crate::error::{EngineError, EngineErrorCause}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{ToTransmitter, Transmitter}; -use crate::models::{ - Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, -}; +use crate::cmd::docker; +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::models::scaleway::ScwZone; use crate::runtime::block_on; -use retry::delay::Fibonacci; -use retry::Error::Operation; -use retry::OperationResult; -use rusoto_core::param::ToParam; +use url::Url; pub struct ScalewayCR { context: Context, id: String, name: String, default_project_id: String, - login: String, secret_token: String, zone: ScwZone, + registry_info: ContainerRegistryInfo, listeners: Listeners, } @@ -39,17 +29,44 @@ impl ScalewayCR { secret_token: &str, default_project_id: &str, zone: ScwZone, - ) -> ScalewayCR { - ScalewayCR { + listener: Listener, + ) -> Result { + // Be sure we are logged on the registry + let login = "nologin".to_string(); + let secret_token = secret_token.to_string(); + + let mut registry = Url::parse(&format!("https://rg.{}.scw.cloud", zone.region())).unwrap(); + let _ = registry.set_username(&login); + let _ = registry.set_password(Some(&secret_token)); + + if context.docker.login(®istry).is_err() { + return Err(ContainerRegistryError::InvalidCredentials); + } + + let registry_info = ContainerRegistryInfo { + endpoint: registry, + registry_name: name.to_string(), + registry_docker_json_config: Some(Self::get_docker_json_config_raw( + &login, + &secret_token, + zone.region().as_str(), + )), + get_image_name: Box::new(move |img_name| format!("{}/{}", img_name, img_name)), + get_repository_name: Box::new(|img_name| img_name.to_string()), + }; + + let cr = ScalewayCR { context, id: id.to_string(), name: name.to_string(), default_project_id: default_project_id.to_string(), - login: "nologin".to_string(), - secret_token: secret_token.to_string(), + secret_token, zone, - listeners: Vec::new(), - } + registry_info, + listeners: vec![listener], + }; + + Ok(cr) } fn get_configuration(&self) -> scaleway_api_rs::apis::configuration::Configuration { @@ -62,16 +79,9 @@ impl ScalewayCR { } } - fn get_docker_envs(&self) -> Vec<(&str, &str)> { - match self.context.docker_tcp_socket() { - Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())], - None => vec![], - } - } - pub fn get_registry_namespace( &self, - image: &Image, + namespace_name: &str, ) -> Option { // https://developers.scaleway.com/en/products/registry/api/#get-09e004 let scaleway_registry_namespaces = match block_on(scaleway_api_rs::apis::namespaces_api::list_namespaces( @@ -82,25 +92,17 @@ impl ScalewayCR { None, None, Some(self.default_project_id.as_str()), - image.registry_name.as_deref(), + Some(namespace_name), )) { Ok(res) => res.namespaces, - Err(e) => { - error!( - "Error while interacting with Scaleway API (list_namespaces), error: {}, image: {}", - e, &image.name - ); + Err(_e) => { return None; } }; // We consider every registry namespace names are unique if let Some(registries) = scaleway_registry_namespaces { - if let Some(registry) = registries - .into_iter() - .filter(|r| r.status == Some(Status::Ready)) - .next() - { + if let Some(registry) = registries.into_iter().find(|r| r.status == Some(Status::Ready)) { return Some(registry); } } @@ -117,16 +119,12 @@ impl ScalewayCR { None, None, None, - Some(image.name.as_str()), + Some(image.name().as_str()), None, Some(self.default_project_id.as_str()), )) { Ok(res) => res.images, - Err(e) => { - error!( - "Error while interacting with Scaleway API (list_images), error: {}, image: {}", - e, &image.name - ); + Err(_e) => { return None; } }; @@ -144,14 +142,18 @@ impl ScalewayCR { None } - pub fn delete_image(&self, image: &Image) -> Result { + pub fn delete_image( + &self, + image: &Image, + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-67dbf7 let image_to_delete = self.get_image(image); if image_to_delete.is_none() { - let message = format!("While tyring to delete image {}, image doesn't exist", &image.name,); - error!("{}", message); - - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ContainerRegistryError::ImageDoesntExistInRegistry { + registry_name: self.name.to_string(), + repository_name: image.registry_name.to_string(), + image_name: image.name.to_string(), + }); } let image_to_delete = image_to_delete.unwrap(); @@ -162,78 +164,25 @@ impl ScalewayCR { image_to_delete.id.unwrap().as_str(), )) { Ok(res) => Ok(res), - Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (delete_image), error: {}, image: {}", - e, &image.name - ); - - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ContainerRegistryError::CannotDeleteImage { + registry_name: self.name.to_string(), + repository_name: image.registry_name.to_string(), + image_name: image.name.to_string(), + raw_error_message: e.to_string(), + }), } } - fn push_image(&self, dest: String, dest_latest_tag: String, image: &Image) -> Result { - // https://www.scaleway.com/en/docs/deploy-an-image-from-registry-to-kubernetes-kapsule/ - match docker_tag_and_push_image(self.kind(), self.get_docker_envs(), &image, dest, dest_latest_tag) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker push".to_string()), - )) - } - }; - - let result = retry::retry(Fibonacci::from_millis(10000).take(10), || { - match self.does_image_exists(image) { - true => OperationResult::Ok(&image), - false => { - warn!("image is not yet available on Scaleway Registry Namespace, retrying in a few seconds..."); - OperationResult::Retry(()) - } - } - }); - - let image_not_reachable = Err(self.engine_error( - EngineErrorCause::Internal, - "image has been pushed on Scaleway Registry Namespace but is not yet available after 4min. Please try to redeploy in a few minutes".to_string(), - )); - - match result { - Ok(_) => Ok(PushResult { image: image.clone() }), - Err(Operation { .. }) => image_not_reachable, - Err(retry::Error::Internal(_)) => image_not_reachable, - } - } - - fn pull_image(&self, dest: String, image: &Image) -> Result { - match docker_pull_image(self.kind(), self.get_docker_envs(), dest) { - Ok(_) => {} - Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - e.message - .unwrap_or_else(|| "unknown error occurring during docker pull".to_string()), - )) - } - }; - - Ok(PullResult::Some(image.clone())) - } - pub fn create_registry_namespace( &self, - image: &Image, - ) -> Result { + namespace_name: &str, + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#post-7a8fcc match block_on(scaleway_api_rs::apis::namespaces_api::create_namespace( &self.get_configuration(), self.zone.region().to_string().as_str(), scaleway_api_rs::models::inline_object_29::InlineObject29 { - name: image.name.clone(), + name: namespace_name.to_string(), description: None, project_id: Some(self.default_project_id.clone()), is_public: Some(false), @@ -241,32 +190,25 @@ impl ScalewayCR { }, )) { Ok(res) => Ok(res), - Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (create_namespace), error: {}, image: {}", - e, &image.name - ); - - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ContainerRegistryError::CannotCreateRepository { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + raw_error_message: e.to_string(), + }), } } pub fn delete_registry_namespace( &self, - image: &Image, - ) -> Result { + namespace_name: &str, + ) -> Result { // https://developers.scaleway.com/en/products/registry/api/#delete-c1ac9b - let registry_to_delete = self.get_registry_namespace(image); + let registry_to_delete = self.get_registry_namespace(namespace_name); if registry_to_delete.is_none() { - let message = format!( - "While tyring to delete registry namespace for image {}, registry namespace doesn't exist", - &image.name, - ); - error!("{}", message); - - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ContainerRegistryError::RepositoryDoesntExistInRegistry { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + }); } let registry_to_delete = registry_to_delete.unwrap(); @@ -277,70 +219,39 @@ impl ScalewayCR { registry_to_delete.id.unwrap().as_str(), )) { Ok(res) => Ok(res), - Err(e) => { - let message = format!( - "Error while interacting with Scaleway API (delete_namespace), error: {}, image: {}", - e, &image.name - ); - - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ContainerRegistryError::CannotDeleteRepository { + registry_name: self.name.to_string(), + repository_name: namespace_name.to_string(), + raw_error_message: e.to_string(), + }), } } pub fn get_or_create_registry_namespace( &self, - image: &Image, - ) -> Result { + namespace_name: &str, + ) -> Result { + info!("Get/Create repository for {}", namespace_name); + // check if the repository already exists - let registry_namespace = self.get_registry_namespace(&image); + let registry_namespace = self.get_registry_namespace(namespace_name); if let Some(namespace) = registry_namespace { - info!("Scaleway registry namespace {} already exists", image.name.as_str()); return Ok(namespace); } - self.create_registry_namespace(image) + self.create_registry_namespace(namespace_name) } - fn get_docker_json_config_raw(&self) -> String { + fn get_docker_json_config_raw(login: &str, secret_token: &str, region: &str) -> String { base64::encode( format!( r#"{{"auths":{{"rg.{}.scw.cloud":{{"auth":"{}"}}}}}}"#, - self.zone.region().as_str(), - base64::encode(format!("nologin:{}", self.secret_token).as_bytes()) + region, + base64::encode(format!("{}:{}", login, secret_token).as_bytes()) ) .as_bytes(), ) } - - fn exec_docker_login(&self, registry_url: &String) -> Result<(), EngineError> { - if docker_login( - Kind::ScalewayCr, - self.get_docker_envs(), - self.login.clone(), - self.secret_token.clone(), - registry_url.clone(), - ) - .is_err() - { - return Err(self.engine_error( - EngineErrorCause::User( - "Your Scaleway account seems to be no longer valid (bad Credentials). \ - Please contact your Organization administrator to fix or change the Credentials.", - ), - format!("failed to login to Scaleway {}", self.name_with_id()), - )); - }; - - Ok(()) - } -} - -impl ToTransmitter for ScalewayCR { - fn to_transmitter(&self) -> Transmitter { - Transmitter::ContainerRegistry(self.id().to_string(), self.name().to_string()) - } } impl ContainerRegistry for ScalewayCR { @@ -360,195 +271,31 @@ impl ContainerRegistry for ScalewayCR { self.name.as_str() } - fn is_valid(&self) -> Result<(), NewEngineError> { + fn registry_info(&self) -> &ContainerRegistryInfo { + &self.registry_info + } + + fn create_registry(&self) -> Result<(), ContainerRegistryError> { + // Nothing to do, scaleway managed container registry per repository (aka `namespace` by the scw naming convention) Ok(()) } - fn on_create(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_create_error(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn on_delete_error(&self) -> Result<(), EngineError> { + fn create_repository(&self, name: &str) -> Result<(), ContainerRegistryError> { + let _ = self.get_or_create_registry_namespace(name)?; Ok(()) } fn does_image_exists(&self, image: &Image) -> bool { - let registry_url = image - .registry_url - .as_ref() - .unwrap_or(&"undefined".to_string()) - .to_param(); - - if let Err(_) = docker_login( - Kind::ScalewayCr, - self.get_docker_envs(), - self.login.clone(), - self.secret_token.clone(), - registry_url.clone(), - ) { - return false; + let image = docker::ContainerImage { + registry: self.registry_info.endpoint.clone(), + name: image.name(), + tags: vec![image.tag.clone()], + }; + match self.context.docker.does_image_exist_remotely(&image) { + Ok(true) => true, + Ok(false) => false, + Err(_) => false, } - - docker_manifest_inspect( - Kind::ScalewayCr, - self.get_docker_envs(), - image.name.clone(), - image.tag.clone(), - registry_url, - ) - .is_some() - } - - fn pull(&self, image: &Image) -> Result { - let listeners_helper = ListenersHelper::new(&self.listeners); - - let mut image = image.clone(); - let registry_url: String; - - match self.get_or_create_registry_namespace(&image) { - Ok(registry) => { - info!( - "Scaleway registry namespace for {} has been created", - image.name.as_str() - ); - image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name - image.registry_url = registry.endpoint.clone(); - image.registry_secret = Some(self.secret_token.clone()); - image.registry_docker_json_config = Some(self.get_docker_json_config_raw()); - registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string()); - } - Err(e) => { - error!( - "Scaleway registry namespace for {} cannot be created, error: {:?}", - image.name.as_str(), - e - ); - return Err(e); - } - } - - if !self.does_image_exists(&image) { - let info_message = format!("image {:?} does not exist in SCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PullResult::None); - } - - let info_message = format!("pull image {:?} from SCR {} repository", image, self.name()); - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let _ = self.exec_docker_login(®istry_url)?; - - let dest = format!("{}/{}", registry_url, image.name_with_tag()); - - // pull image - self.pull_image(dest, &image) - } - - fn push(&self, image: &Image, force_push: bool) -> Result { - let mut image = image.clone(); - let registry_url: String; - let registry_name: String; - - match self.get_or_create_registry_namespace(&image) { - Ok(registry) => { - info!( - "Scaleway registry namespace for {} has been created", - image.name.as_str() - ); - image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name - image.registry_url = registry.endpoint.clone(); - image.registry_secret = Some(self.secret_token.clone()); - image.registry_docker_json_config = Some(self.get_docker_json_config_raw()); - registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string()); - registry_name = registry.name.unwrap(); - } - Err(e) => { - error!( - "Scaleway registry namespace for {} cannot be created, error: {:?}", - image.name.as_str(), - e - ); - return Err(e); - } - } - - let _ = self.exec_docker_login(®istry_url)?; - - let dest = format!("{}/{}", registry_url, image.name_with_tag()); - - let listeners_helper = ListenersHelper::new(&self.listeners); - - if !force_push && self.does_image_exists(&image) { - // check if image does exist - if yes, do not upload it again - let info_message = format!( - "image {} found on Scaleway {} repository, container build is not required", - image, registry_name, - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - return Ok(PushResult { image: image.clone() }); - } - - let info_message = format!( - "image {} does not exist on Scaleway {} repository, starting image upload", - image, - self.name() - ); - - info!("{}", info_message.as_str()); - - listeners_helper.deployment_in_progress(ProgressInfo::new( - ProgressScope::Application { - id: image.application_id.clone(), - }, - ProgressLevel::Info, - Some(info_message), - self.context.execution_id(), - )); - - let dest_latest_tag = format!("{}/{}:latest", registry_url, image.name); - self.push_image(dest, dest_latest_tag, &image) - } - - fn push_error(&self, image: &Image) -> Result { - Ok(PushResult { image: image.clone() }) } } diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 134c7b7a..2ed36465 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -1,9 +1,8 @@ use std::net::Ipv4Addr; +use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::{DnsProvider, Kind}; -use crate::errors::EngineError; -use crate::events::{ToTransmitter, Transmitter}; -use crate::models::{Context, Domain}; +use crate::io_models::{Context, Domain}; pub struct Cloudflare { context: Context, @@ -71,19 +70,11 @@ impl DnsProvider for Cloudflare { vec![Ipv4Addr::new(1, 1, 1, 1), Ipv4Addr::new(1, 0, 0, 1)] } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), DnsProviderError> { if self.cloudflare_api_token.is_empty() || self.cloudflare_email.is_empty() { - Err(EngineError::new_client_invalid_cloud_provider_credentials( - self.get_event_details(), - )) + Err(DnsProviderError::InvalidCredentials) } else { Ok(()) } } } - -impl ToTransmitter for Cloudflare { - fn to_transmitter(&self) -> Transmitter { - Transmitter::DnsProvider(self.id().to_string(), self.name().to_string()) - } -} diff --git a/src/dns_provider/errors.rs b/src/dns_provider/errors.rs new file mode 100644 index 00000000..8d97bedd --- /dev/null +++ b/src/dns_provider/errors.rs @@ -0,0 +1,7 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum DnsProviderError { + #[error("Invalid credentials error.")] + InvalidCredentials, +} diff --git a/src/dns_provider/mod.rs b/src/dns_provider/mod.rs index 7d140468..bba32a2d 100644 --- a/src/dns_provider/mod.rs +++ b/src/dns_provider/mod.rs @@ -1,15 +1,14 @@ use std::net::Ipv4Addr; +use crate::dns_provider::errors::DnsProviderError; use serde::{Deserialize, Serialize}; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; -use crate::errors::EngineError as NewEngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; -use crate::models::{Context, Domain, QoveryIdentifier}; +use crate::io_models::{Context, Domain}; pub mod cloudflare; +pub mod errors; -pub trait DnsProvider: ToTransmitter { +pub trait DnsProvider { fn context(&self) -> &Context; fn provider_name(&self) -> &str; fn kind(&self) -> Kind; @@ -22,30 +21,7 @@ pub trait DnsProvider: ToTransmitter { fn token(&self) -> &str; fn domain(&self) -> &Domain; fn resolvers(&self) -> Vec; - fn is_valid(&self) -> Result<(), NewEngineError>; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::DnsProvider(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } - fn get_event_details(&self) -> EventDetails { - let context = self.context(); - EventDetails::new( - None, - QoveryIdentifier::from(context.organization_id().to_string()), - QoveryIdentifier::from(context.cluster_id().to_string()), - QoveryIdentifier::from(context.execution_id().to_string()), - None, - Stage::Environment(EnvironmentStep::Deploy), - self.to_transmitter(), - ) - } + fn is_valid(&self) -> Result<(), DnsProviderError>; } #[derive(Serialize, Deserialize, Clone, Debug)] diff --git a/src/engine.rs b/src/engine.rs index 2284d672..3e296c2e 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -1,13 +1,30 @@ use std::borrow::Borrow; use std::sync::Arc; +use thiserror::Error; use crate::build_platform::BuildPlatform; use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::CloudProvider; +use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::ContainerRegistry; +use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::DnsProvider; use crate::errors::EngineError; -use crate::models::Context; +use crate::io_models::Context; + +#[derive(Error, Debug, PartialEq)] +pub enum EngineConfigError { + #[error("Build platform is not valid error: {0}")] + BuildPlatformNotValid(EngineError), + #[error("Container registry is not valid error: {0}")] + ContainerRegistryNotValid(ContainerRegistryError), + #[error("Cloud provider is not valid error: {0}")] + CloudProviderNotValid(EngineError), + #[error("DNS provider is not valid error: {0}")] + DnsProviderNotValid(DnsProviderError), + #[error("Kubernetes is not valid error: {0}")] + KubernetesNotValid(EngineError), +} pub struct EngineConfig { context: Context, @@ -61,11 +78,14 @@ impl EngineConfig { (*self.dns_provider).borrow() } - pub fn is_valid(&self) -> Result<(), EngineError> { - self.build_platform.is_valid()?; - self.container_registry.is_valid()?; - self.cloud_provider.is_valid()?; - self.dns_provider.is_valid()?; + pub fn is_valid(&self) -> Result<(), EngineConfigError> { + if let Err(e) = self.cloud_provider.is_valid() { + return Err(EngineConfigError::CloudProviderNotValid(e)); + } + + if let Err(e) = self.dns_provider.is_valid() { + return Err(EngineConfigError::DnsProviderNotValid(e)); + } Ok(()) } diff --git a/src/error.rs b/src/error.rs index a64601ab..0cc266dd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -110,20 +110,17 @@ pub fn cast_simple_error_to_engine_error>( match input { Err(simple_error) => { let message = match simple_error.kind { - SimpleErrorKind::Command(exit_status) => format!( - "{} ({})", - simple_error.message.unwrap_or("".into()), - exit_status - ), - SimpleErrorKind::Other => simple_error.message.unwrap_or("".into()), + SimpleErrorKind::Command(exit_status) => { + format!( + "{} ({})", + simple_error.message.unwrap_or_else(|| "".into()), + exit_status + ) + } + SimpleErrorKind::Other => simple_error.message.unwrap_or_else(|| "".into()), }; - Err(EngineError::new( - EngineErrorCause::Internal, - scope, - execution_id, - Some(message), - )) + Err(EngineError::new(EngineErrorCause::Internal, scope, execution_id, Some(message))) } Ok(x) => Ok(x), } diff --git a/src/errors/io.rs b/src/errors/io.rs index 0cebdd46..42501a16 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -12,7 +12,7 @@ pub struct CommandError { impl From for CommandError { fn from(error: errors::CommandError) -> Self { CommandError { - message: error.message_safe.unwrap_or("".to_string()), + message: error.message_safe.unwrap_or_default(), message_unsafe: error.message_raw, } } @@ -28,6 +28,7 @@ pub enum Tag { CannotGetWorkspaceDirectory, UnsupportedInstanceType, CannotRetrieveClusterConfigFile, + CannotCreateFile, CannotGetClusterNodes, NotEnoughResourcesToDeployEnvironment, CannotUninstallHelmChart, @@ -74,6 +75,7 @@ pub enum Tag { UnsupportedVersion, CannotGetSupportedVersions, CannotGetCluster, + ContainerRegistryError, ObjectStorageCannotCreateBucket, ObjectStorageCannotPutFileIntoBucket, NoClusterFound, @@ -88,6 +90,7 @@ pub enum Tag { CloudProviderClientInvalidCredentials, VersionNumberParsingError, NotImplementedError, + BuilderError, BuilderDockerCannotFindAnyDockerfile, BuilderDockerCannotReadDockerfile, BuilderDockerCannotExtractEnvVarsFromDockerfile, @@ -96,8 +99,22 @@ pub enum Tag { BuilderBuildpackCannotBuildContainerImage, BuilderGetBuildError, BuilderCloningRepositoryError, + DockerError, DockerPushImageError, DockerPullImageError, + BuilderDockerCannotListImages, + ContainerRegistryRepositoryCreationError, + ContainerRegistryRepositorySetLifecycleError, + ContainerRegistryGetCredentialsError, + ContainerRegistryImageDoesntExist, + ContainerRegistryImageUnreachableAfterPush, + ContainerRegistryRepositoryDoesntExist, + ContainerRegistryDeleteRepositoryError, + ContainerRegistryDeleteImageError, + ObjectStorageInvalidBucketName, + ObjectStorageCannotEmptyBucket, + ObjectStorageCannotTagBucket, + ObjectStorageCannotActivateBucketVersioning, } impl From for Tag { @@ -106,6 +123,7 @@ impl From for Tag { errors::Tag::Unknown => Tag::Unknown, errors::Tag::UnsupportedInstanceType => Tag::UnsupportedInstanceType, errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile, + errors::Tag::CannotCreateFile => Tag::CannotCreateFile, errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes, errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment, errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable, @@ -186,6 +204,26 @@ impl From for Tag { errors::Tag::BuilderCloningRepositoryError => Tag::BuilderCloningRepositoryError, errors::Tag::DockerPushImageError => Tag::DockerPushImageError, errors::Tag::DockerPullImageError => Tag::DockerPullImageError, + errors::Tag::ContainerRegistryRepositoryCreationError => Tag::ContainerRegistryRepositoryCreationError, + errors::Tag::ContainerRegistryRepositorySetLifecycleError => { + Tag::ContainerRegistryRepositorySetLifecycleError + } + errors::Tag::ContainerRegistryGetCredentialsError => Tag::ContainerRegistryGetCredentialsError, + errors::Tag::ContainerRegistryDeleteImageError => Tag::ContainerRegistryDeleteImageError, + errors::Tag::ContainerRegistryImageDoesntExist => Tag::ContainerRegistryImageDoesntExist, + errors::Tag::ContainerRegistryImageUnreachableAfterPush => Tag::ContainerRegistryImageUnreachableAfterPush, + errors::Tag::ContainerRegistryRepositoryDoesntExist => Tag::ContainerRegistryRepositoryDoesntExist, + errors::Tag::ContainerRegistryDeleteRepositoryError => Tag::ContainerRegistryDeleteRepositoryError, + errors::Tag::BuilderDockerCannotListImages => Tag::BuilderDockerCannotListImages, + errors::Tag::DockerError => Tag::DockerError, + errors::Tag::ObjectStorageInvalidBucketName => Tag::ObjectStorageInvalidBucketName, + errors::Tag::ObjectStorageCannotEmptyBucket => Tag::ObjectStorageCannotEmptyBucket, + errors::Tag::ObjectStorageCannotTagBucket => Tag::ObjectStorageCannotTagBucket, + errors::Tag::ObjectStorageCannotActivateBucketVersioning => { + Tag::ObjectStorageCannotActivateBucketVersioning + } + errors::Tag::BuilderError => Tag::BuilderError, + errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError, } } } @@ -209,10 +247,7 @@ impl From for EngineError { event_details: EventDetails::from(error.event_details), qovery_log_message: error.qovery_log_message, user_log_message: error.user_log_message, - message: match error.message { - Some(msg) => Some(CommandError::from(msg)), - None => None, - }, + message: error.message.map(CommandError::from), link: error.link.map(|url| url.to_string()), hint_message: error.hint_message, } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index b3b1a6a8..22184db2 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -2,15 +2,22 @@ pub mod io; extern crate url; +use crate::build_platform::BuildError; use crate::cloud_provider::utilities::VersionsNumber; +use crate::cmd; +use crate::cmd::docker::DockerError; use crate::cmd::helm::HelmError; +use crate::container_registry::errors::ContainerRegistryError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; +use crate::object_storage::errors::ObjectStorageError; +use std::fmt::{Display, Formatter}; +use thiserror::Error; use url::Url; /// CommandError: command error, mostly returned by third party tools. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Error, PartialEq)] pub struct CommandError { /// message: full error message, can contains unsafe text such as passwords and tokens. message_raw: String, @@ -55,6 +62,17 @@ impl CommandError { } } + /// Creates a new CommandError from legacy command error. + pub fn new_from_legacy_command_error( + legacy_command_error: cmd::command::CommandError, + safe_message: Option, + ) -> Self { + CommandError { + message_raw: legacy_command_error.to_string(), + message_safe: safe_message, + } + } + /// Create a new CommandError from a CMD command. pub fn new_from_command_line( message: String, @@ -66,8 +84,8 @@ impl CommandError { ) -> Self { let mut unsafe_message = format!( "{}\ncommand: {} {}\nenv: {}", - message.to_string(), - bin.to_string(), + message, + bin, cmd_args.join(" "), envs.iter() .map(|(k, v)| format!("{}={}", k, v)) @@ -86,6 +104,24 @@ impl CommandError { } } +impl Display for CommandError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.message().as_str()) + } +} + +impl From for CommandError { + fn from(object_storage_error: ObjectStorageError) -> Self { + CommandError::new_from_safe_message(object_storage_error.to_string()) + } +} + +impl From for CommandError { + fn from(container_registry_error: ContainerRegistryError) -> Self { + CommandError::new_from_safe_message(container_registry_error.to_string()) + } +} + #[derive(Clone, Debug, PartialEq)] /// Tag: unique identifier for an error. pub enum Tag { @@ -107,6 +143,8 @@ pub enum Tag { UnsupportedZone, /// CannotRetrieveKubernetesConfigFile: represents an error while trying to retrieve Kubernetes config file. CannotRetrieveClusterConfigFile, + /// CannotCreateFile: represents an error while trying to create a file. + CannotCreateFile, /// CannotGetClusterNodes: represents an error while trying to get cluster's nodes. CannotGetClusterNodes, /// NotEnoughResourcesToDeployEnvironment: represents an error when trying to deploy an environment but there are not enough resources available on the cluster. @@ -223,6 +261,8 @@ pub enum Tag { NotImplementedError, /// TaskCancellationRequested: represents an error where current task cancellation has been requested. TaskCancellationRequested, + /// BuildError: represents an error when trying to build an application. + BuilderError, /// BuilderDockerCannotFindAnyDockerfile: represents an error when trying to get a Dockerfile. BuilderDockerCannotFindAnyDockerfile, /// BuilderDockerCannotReadDockerfile: represents an error while trying to read Dockerfile. @@ -231,6 +271,8 @@ pub enum Tag { BuilderDockerCannotExtractEnvVarsFromDockerfile, /// BuilderDockerCannotBuildContainerImage: represents an error while trying to build Docker container image. BuilderDockerCannotBuildContainerImage, + /// BuilderDockerCannotListImages: represents an error while trying to list docker images. + BuilderDockerCannotListImages, /// BuilderBuildpackInvalidLanguageFormat: represents an error where buildback requested language has wrong format. BuilderBuildpackInvalidLanguageFormat, /// BuilderBuildpackCannotBuildContainerImage: represents an error while trying to build container image with Buildpack. @@ -239,13 +281,41 @@ pub enum Tag { BuilderGetBuildError, /// BuilderCloningRepositoryError: represents an error when builder is trying to clone a git repository. BuilderCloningRepositoryError, + /// DockerError: represents an error when trying to use docker cli. + DockerError, /// DockerPushImageError: represents an error when trying to push a docker image. DockerPushImageError, /// DockerPullImageError: represents an error when trying to pull a docker image. DockerPullImageError, + /// ContainerRegistryError: represents an error when trying to interact with a repository. + ContainerRegistryError, + /// ContainerRegistryRepositoryCreationError: represents an error when trying to create a repository. + ContainerRegistryRepositoryCreationError, + /// ContainerRegistryRepositorySetLifecycleError: represents an error when trying to set repository lifecycle policy. + ContainerRegistryRepositorySetLifecycleError, + /// ContainerRegistryGetCredentialsError: represents an error when trying to get container registry credentials. + ContainerRegistryGetCredentialsError, + /// ContainerRegistryDeleteImageError: represents an error while trying to delete an image. + ContainerRegistryDeleteImageError, + /// ContainerRegistryImageDoesntExist: represents an error, image doesn't exist in the registry. + ContainerRegistryImageDoesntExist, + /// ContainerRegistryImageUnreachableAfterPush: represents an error when image has been pushed but is unreachable. + ContainerRegistryImageUnreachableAfterPush, + /// ContainerRegistryRepositoryDoesntExist: represents an error, repository doesn't exist. + ContainerRegistryRepositoryDoesntExist, + /// ContainerRegistryDeleteRepositoryError: represents an error while trying to delete a repository. + ContainerRegistryDeleteRepositoryError, + /// ObjectStorageInvalidBucketName: represents an error, bucket name is not valid. + ObjectStorageInvalidBucketName, + /// ObjectStorageCannotEmptyBucket: represents an error while trying to empty an object storage bucket. + ObjectStorageCannotEmptyBucket, + /// ObjectStorageCannotTagBucket: represents an error while trying to tag an object storage bucket. + ObjectStorageCannotTagBucket, + /// ObjectStorageCannotActivateBucketVersioning: represents an error while trying to activate bucket versioning for bucket. + ObjectStorageCannotActivateBucketVersioning, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] /// EngineError: represents an engine error. Engine will always returns such errors carrying context infos easing monitoring and debugging. pub struct EngineError { /// tag: error unique identifier @@ -337,14 +407,14 @@ impl EngineError { /// Creates new engine error from legacy engine error easing migration. pub fn new_from_legacy_engine_error(e: LegacyEngineError) -> Self { - let message = e.message.unwrap_or("".to_string()); + let message = e.message.unwrap_or_default(); EngineError { tag: Tag::Unknown, event_details: EventDetails::new( None, - QoveryIdentifier::new("".to_string()), - QoveryIdentifier::new("".to_string()), - QoveryIdentifier::new(e.execution_id.to_string()), + QoveryIdentifier::new_from_long_id("".to_string()), + QoveryIdentifier::new_from_long_id("".to_string()), + QoveryIdentifier::new_from_long_id(e.execution_id.to_string()), None, Stage::General(GeneralStep::UnderMigration), match e.scope { @@ -362,7 +432,7 @@ impl EngineError { }, ), qovery_log_message: message.to_string(), - user_log_message: message.to_string(), + user_log_message: message, message: None, link: None, hint_message: None, @@ -424,7 +494,7 @@ impl EngineError { event_details, Tag::MissingRequiredEnvVariable, message.to_string(), - message.to_string(), + message, None, None, None, @@ -593,7 +663,7 @@ impl EngineError { event_details: EventDetails, error_message: CommandError, ) -> EngineError { - let message = "Cannot retrieve Kubernetes instance type is not supported"; + let message = "Cannot retrieve Kubernetes kubeconfig"; EngineError::new( event_details, Tag::CannotRetrieveClusterConfigFile, @@ -605,6 +675,25 @@ impl EngineError { ) } + /// Creates new error for file we can't create. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error_message`: Raw error message. + pub fn new_cannot_create_file(event_details: EventDetails, error_message: CommandError) -> EngineError { + let message = "Cannot create file"; + EngineError::new( + event_details, + Tag::CannotCreateFile, + message.to_string(), + message.to_string(), + Some(error_message), + None, + None, + ) + } + /// Creates new error for Kubernetes cannot get nodes. /// /// Arguments: @@ -643,10 +732,7 @@ impl EngineError { let mut message = vec!["There is not enough resources on the cluster:".to_string()]; if requested_cpu > free_cpu { - message.push(format!( - "{} CPU requested and only {} CPU available", - free_cpu, requested_cpu - )); + message.push(format!("{} CPU requested and only {} CPU available", free_cpu, requested_cpu)); } if requested_ram_in_mib > free_ram_in_mib { @@ -690,7 +776,7 @@ impl EngineError { event_details, Tag::NotEnoughResourcesToDeployEnvironment, message.to_string(), - message.to_string(), + message, None, None, Some("Consider to add one more node or upgrade your nodes configuration. If not possible, pause or delete unused environments.".to_string()), @@ -720,7 +806,7 @@ impl EngineError { event_details, Tag::CannotUninstallHelmChart, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -760,16 +846,13 @@ impl EngineError { event_details: EventDetails, kubernetes_raw_version: String, ) -> EngineError { - let message = format!( - "Unable to determine Kubernetes master version: `{}`", - kubernetes_raw_version, - ); + let message = format!("Unable to determine Kubernetes master version: `{}`", kubernetes_raw_version,); EngineError::new( event_details, Tag::CannotDetermineK8sMasterVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -797,7 +880,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sRequestedUpgradeVersion, message.to_string(), - message.to_string(), + message, error_message, None, None, @@ -814,16 +897,13 @@ impl EngineError { event_details: EventDetails, kubelet_worker_raw_version: String, ) -> EngineError { - let message = format!( - "Unable to determine Kubelet worker version: `{}`", - kubelet_worker_raw_version, - ); + let message = format!("Unable to determine Kubelet worker version: `{}`", kubelet_worker_raw_version,); EngineError::new( event_details, Tag::CannotDetermineK8sKubeletWorkerVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -846,7 +926,7 @@ impl EngineError { event_details, Tag::CannotDetermineK8sKubeProxyVersion, message.to_string(), - message.to_string(), + message, None, None, None, @@ -883,16 +963,13 @@ impl EngineError { /// * `event_details`: Error linked event details. /// * `pod_name`: Pod name having PDB in an invalid state. pub fn new_k8s_pod_disruption_budget_invalid_state(event_details: EventDetails, pod_name: String) -> EngineError { - let message = format!( - "Unable to upgrade Kubernetes, pdb for app `{}` in invalid state.", - pod_name, - ); + let message = format!("Unable to upgrade Kubernetes, pdb for app `{}` in invalid state.", pod_name,); EngineError::new( event_details, Tag::K8sPodDisruptionBudgetInInvalidState, message.to_string(), - message.to_string(), + message, None, None, None, @@ -940,7 +1017,7 @@ impl EngineError { event_details, Tag::K8sCannotDeletePod, message.to_string(), - message.to_string(), + message, Some(raw_k8s_error), None, None, @@ -1013,7 +1090,7 @@ impl EngineError { event_details, Tag::K8sUpgradeDeployedVsRequestedVersionsInconsistency, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1045,7 +1122,7 @@ impl EngineError { event_details, Tag::K8sScaleReplicas, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1118,7 +1195,7 @@ impl EngineError { event_details, Tag::K8sGetLogs, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1143,7 +1220,7 @@ impl EngineError { event_details, Tag::K8sGetLogs, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1173,7 +1250,7 @@ impl EngineError { event_details, Tag::K8sDescribe, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1194,7 +1271,7 @@ impl EngineError { event_details, Tag::K8sHistory, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1219,7 +1296,7 @@ impl EngineError { event_details, Tag::K8sCannotCreateNamespace, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1249,7 +1326,7 @@ impl EngineError { event_details, Tag::K8sPodIsNotReady, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1268,16 +1345,13 @@ impl EngineError { requested_version: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error, node is not ready with the requested version `{}`.", - requested_version - ); + let message = format!("Error, node is not ready with the requested version `{}`.", requested_version); EngineError::new( event_details, Tag::K8sNodeIsNotReadyWithTheRequestedVersion, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1327,7 +1401,7 @@ impl EngineError { event_details, Tag::K8sValidateRequiredCPUandBurstableError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, Some("Please ensure your configuration is valid.".to_string()), @@ -1347,7 +1421,7 @@ impl EngineError { event_details, Tag::CannotFindRequiredBinary, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1375,7 +1449,7 @@ impl EngineError { event_details, Tag::SubnetsCountShouldBeEven, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1400,7 +1474,7 @@ impl EngineError { event_details, Tag::CannotGetOrCreateIamRole, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1421,16 +1495,13 @@ impl EngineError { to_dir: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to copy all files from `{}` to `{}`.", - from_dir, to_dir - ); + let message = format!("Error while trying to copy all files from `{}` to `{}`.", from_dir, to_dir); EngineError::new( event_details, Tag::CannotCopyFilesFromDirectoryToDirectory, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1477,7 +1548,7 @@ impl EngineError { event_details, Tag::TerraformCannotRemoveEntryOut, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1575,7 +1646,7 @@ impl EngineError { event_details, Tag::TerraformContextUnsupportedParameterValue, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -1648,6 +1719,42 @@ impl EngineError { /// /// * `event_details`: Error linked event details. /// * `error`: Raw error message. + pub fn new_container_registry_error(event_details: EventDetails, error: ContainerRegistryError) -> EngineError { + EngineError::new( + event_details, + Tag::ContainerRegistryError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + + /// Creates new error from an Build error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. + pub fn new_build_error(event_details: EventDetails, error: BuildError) -> EngineError { + EngineError::new( + event_details, + Tag::BuilderError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + + /// Creates new error from an Container Registry error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. pub fn new_helm_error(event_details: EventDetails, error: HelmError) -> EngineError { let cmd_error = match &error { HelmError::CmdError(_, _, cmd_error) => Some(cmd_error.clone()), @@ -1677,13 +1784,13 @@ impl EngineError { helm_chart: String, raw_error: CommandError, ) -> EngineError { - let message = format!("Error while uninstalling helm chart: `{}`.", helm_chart.to_string()); + let message = format!("Error while uninstalling helm chart: `{}`.", helm_chart); EngineError::new( event_details, Tag::HelmChartUninstallError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1706,15 +1813,14 @@ impl EngineError { ) -> EngineError { let message = format!( "Error while trying to get helm chart `{}` history in namespace `{}`.", - helm_chart.to_string(), - namespace + helm_chart, namespace ); EngineError::new( event_details, Tag::HelmHistoryError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1753,16 +1859,13 @@ impl EngineError { product_name: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to get supported versions for `{}`.", - product_name.to_string() - ); + let message = format!("Error while trying to get supported versions for `{}`.", product_name); EngineError::new( event_details, Tag::CannotGetSupportedVersions, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -1781,82 +1884,19 @@ impl EngineError { product_name: String, version: String, ) -> EngineError { - let message = format!( - "Error, version `{}` is not supported for `{}`.", - version.to_string(), - product_name.to_string() - ); + let message = format!("Error, version `{}` is not supported for `{}`.", version, product_name); EngineError::new( event_details, Tag::UnsupportedVersion, message.to_string(), - message.to_string(), + message, None, None, None, ) } - /// Creates new object storage cannot create bucket. - /// - /// Arguments: - /// - /// * `event_details`: Error linked event details. - /// * `bucket_name`: Object storage bucket name. - /// * `raw_error`: Raw error message. - pub fn new_object_storage_cannot_create_bucket_error( - event_details: EventDetails, - bucket_name: String, - raw_error: CommandError, - ) -> EngineError { - let message = format!( - "Error, cannot create object storage bucket `{}`.", - bucket_name.to_string(), - ); - - EngineError::new( - event_details, - Tag::ObjectStorageCannotCreateBucket, - message.to_string(), - message.to_string(), - Some(raw_error), - None, - None, - ) - } - - /// Creates new object storage cannot put file into bucket. - /// - /// Arguments: - /// - /// * `event_details`: Error linked event details. - /// * `bucket_name`: Object storage bucket name. - /// * `file_name`: File name to be added into the bucket. - /// * `raw_error`: Raw error message. - pub fn new_object_storage_cannot_put_file_into_bucket_error( - event_details: EventDetails, - bucket_name: String, - file_name: String, - raw_error: CommandError, - ) -> EngineError { - let message = format!( - "Error, cannot put file `{}` into object storage bucket `{}`.", - file_name.to_string(), - bucket_name.to_string(), - ); - - EngineError::new( - event_details, - Tag::ObjectStorageCannotPutFileIntoBucket, - message.to_string(), - message.to_string(), - Some(raw_error), - None, - None, - ) - } - /// Creates new error while trying to get cluster. /// /// Arguments: @@ -1896,7 +1936,7 @@ impl EngineError { event_details, Tag::ClientServiceFailedToStart, message.to_string(), - message.to_string(), + message, None, None, Some("Ensure you can run it without issues with `qovery run` and check its logs from the web interface or the CLI with `qovery log`. \ @@ -1926,7 +1966,7 @@ impl EngineError { event_details, Tag::ClientServiceFailedToDeployBeforeStart, message.to_string(), - message.to_string(), + message, None, None, None, @@ -1956,7 +1996,7 @@ impl EngineError { event_details, Tag::DatabaseFailedToStartAfterSeveralRetries, message.to_string(), - message.to_string(), + message, raw_error, None, None, @@ -2013,16 +2053,13 @@ impl EngineError { raw_version_number: String, raw_error: CommandError, ) -> EngineError { - let message = format!( - "Error while trying to parse `{}` to a version number.", - raw_version_number - ); + let message = format!("Error while trying to parse `{}` to a version number.", raw_version_number); EngineError::new( event_details, Tag::VersionNumberParsingError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2124,7 +2161,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotFindAnyDockerfile, message.to_string(), - message.to_string(), + message, None, None, Some("Your Dockerfile is not present at the specified location, check your settings.".to_string()), @@ -2141,16 +2178,13 @@ impl EngineError { event_details: EventDetails, requested_language: String, ) -> EngineError { - let message = format!( - "Cannot build: Invalid buildpacks language format: `{}`.", - requested_language - ); + let message = format!("Cannot build: Invalid buildpacks language format: `{}`.", requested_language); EngineError::new( event_details, Tag::BuilderBuildpackInvalidLanguageFormat, message.to_string(), - message.to_string(), + message, None, None, Some("Expected format `builder[@version]`.".to_string()), @@ -2205,7 +2239,7 @@ impl EngineError { event_details, Tag::BuilderGetBuildError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2230,7 +2264,7 @@ impl EngineError { event_details, Tag::BuilderCloningRepositoryError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2256,6 +2290,24 @@ impl EngineError { ) } + /// Creates new error from an Docker error + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `error`: Raw error message. + pub fn new_docker_error(event_details: EventDetails, error: DockerError) -> EngineError { + EngineError::new( + event_details, + Tag::DockerError, + error.to_string(), + error.to_string(), + None, + None, + None, + ) + } + /// Creates new error when trying to push a Docker image. /// /// Arguments: @@ -2279,7 +2331,7 @@ impl EngineError { event_details, Tag::DockerPushImageError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2309,7 +2361,7 @@ impl EngineError { event_details, Tag::DockerPullImageError, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2334,7 +2386,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotReadDockerfile, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2359,7 +2411,7 @@ impl EngineError { event_details, Tag::BuilderDockerCannotExtractEnvVarsFromDockerfile, message.to_string(), - message.to_string(), + message, Some(raw_error), None, None, @@ -2384,10 +2436,392 @@ impl EngineError { event_details, Tag::BuilderDockerCannotBuildContainerImage, message.to_string(), - message.to_string(), + message, Some(raw_error), None, Some("It looks like there is something wrong in your Dockerfile. Try building the application locally with `docker build --no-cache`.".to_string()), ) } + + /// Creates new error when trying to create a new container registry namespace. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Container repository name. + /// * `registry_name`: Registry to be created. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_namespace_creation_error( + event_details: EventDetails, + repository_name: String, + registry_name: String, + raw_error: ContainerRegistryError, + ) -> EngineError { + let message = format!("Error, trying to create registry `{}` in `{}`.", registry_name, repository_name); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositoryCreationError, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new error when trying to set container repository lifecycle policy. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_repository_set_lifecycle_policy_error( + event_details: EventDetails, + repository_name: String, + raw_error: ContainerRegistryError, + ) -> EngineError { + let message = format!("Error, trying to set lifecycle policy repository `{}`.", repository_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositorySetLifecycleError, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new error when trying to get container registry credentials. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + pub fn new_container_registry_get_credentials_error( + event_details: EventDetails, + repository_name: String, + ) -> EngineError { + let message = format!( + "Failed to retrieve credentials and endpoint URL from container registry `{}`.", + repository_name, + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryGetCredentialsError, + message.to_string(), + message, + None, + None, + None, + ) + } + + /// Creates new error when trying to delete an image. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_delete_image_error( + event_details: EventDetails, + image_name: String, + raw_error: ContainerRegistryError, + ) -> EngineError { + let message = format!("Failed to delete image `{}`.", image_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryDeleteImageError, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new error when trying to get image from a registry. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + pub fn new_container_registry_image_doesnt_exist( + event_details: EventDetails, + image_name: String, + raw_error: ContainerRegistryError, + ) -> EngineError { + let message = format!("Image `{}` doesn't exists.", image_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryImageDoesntExist, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new error when image is unreachable after push. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `image_name`: Image name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_image_unreachable_after_push( + event_details: EventDetails, + image_name: String, + ) -> EngineError { + let message = format!( + "Image `{}` has been pushed on registry namespace but is not yet available after some time.", + image_name, + ); + + EngineError::new( + event_details, + Tag::ContainerRegistryImageUnreachableAfterPush, + message.to_string(), + message, + None, + None, + Some("Please try to redeploy in a few minutes.".to_string()), + ) + } + + /// Creates new error when trying to get image from a registry. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + pub fn new_container_registry_repository_doesnt_exist( + event_details: EventDetails, + repository_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Repository `{}` doesn't exists.", repository_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryRepositoryDoesntExist, + message.to_string(), + message, + raw_error, + None, + None, + ) + } + + /// Creates new error when trying to delete repository. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `repository_name`: Repository name. + /// * `raw_error`: Raw error message. + pub fn new_container_registry_delete_repository_error( + event_details: EventDetails, + repository_name: String, + raw_error: Option, + ) -> EngineError { + let message = format!("Failed to delete repository `{}`.", repository_name,); + + EngineError::new( + event_details, + Tag::ContainerRegistryDeleteRepositoryError, + message.to_string(), + message, + raw_error, + None, + None, + ) + } + + /// Creates new error when trying to list Docker images. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `raw_error`: Raw error message. + pub fn new_docker_cannot_list_images(event_details: EventDetails, raw_error: CommandError) -> EngineError { + let message = "Error while trying to list docker images."; + + EngineError::new( + event_details, + Tag::BuilderDockerCannotListImages, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + + /// Creates new error, object storage bucket name is not valid. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Errored bucket name. + pub fn new_object_storage_bucket_name_is_invalid(event_details: EventDetails, bucket_name: String) -> EngineError { + let message = format!("Error: bucket name `{}` is not valid.", bucket_name); + + EngineError::new( + event_details, + Tag::ObjectStorageInvalidBucketName, + message.to_string(), + message, + None, + None, + Some("Check your cloud provider documentation to know bucket naming rules.".to_string()), + ) + } + + /// Creates new object storage cannot create bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_create_bucket_error( + event_details: EventDetails, + bucket_name: String, + raw_error: ObjectStorageError, + ) -> EngineError { + let message = format!("Error, cannot create object storage bucket `{}`.", bucket_name,); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotCreateBucket, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new object storage cannot put file into bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `file_name`: File name to be added into the bucket. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_put_file_into_bucket_error( + event_details: EventDetails, + bucket_name: String, + file_name: String, + raw_error: ObjectStorageError, + ) -> EngineError { + let message = format!( + "Error, cannot put file `{}` into object storage bucket `{}`.", + file_name, bucket_name, + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotPutFileIntoBucket, + message.to_string(), + message, + Some(raw_error.into()), + None, + None, + ) + } + + /// Creates new object storage cannot empty object storage bucket. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_empty_bucket( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!("Error while trying to empty object storage bucket `{}`.", bucket_name,); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotEmptyBucket, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) + } + + /// Creates new object storage cannot tag bucket error. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_tag_bucket_error( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!("Error while trying to tag object storage bucket `{}`.", bucket_name,); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotTagBucket, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) + } + + /// Creates new object storage cannot activate bucket versioning error. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `bucket_name`: Object storage bucket name. + /// * `raw_error`: Raw error message. + pub fn new_object_storage_cannot_activate_bucket_versioning_error( + event_details: EventDetails, + bucket_name: String, + raw_error: CommandError, + ) -> EngineError { + let message = format!( + "Error while trying to activate versioning for object storage bucket `{}`.", + bucket_name, + ); + + EngineError::new( + event_details, + Tag::ObjectStorageCannotActivateBucketVersioning, + message.to_string(), + message, + Some(raw_error), + None, + None, + ) + } +} + +impl Display for EngineError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(format!("{:?}", self).as_str()) + } } diff --git a/src/events/io.rs b/src/events/io.rs index c9e21af8..9bf9693c 100644 --- a/src/events/io.rs +++ b/src/events/io.rs @@ -25,41 +25,6 @@ pub enum EngineEvent { error: EngineError, message: Option, }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Waiting { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deploying { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Pausing { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleting { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deployed { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Paused { - details: EventDetails, - message: EventMessage, - }, - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleted { - details: EventDetails, - message: EventMessage, - }, } impl From for EngineEvent { @@ -79,38 +44,7 @@ impl From for EngineEvent { }, events::EngineEvent::Error(e, m) => EngineEvent::Error { error: EngineError::from(e), - message: match m { - Some(msg) => Some(EventMessage::from(msg)), - None => None, - }, - }, - events::EngineEvent::Waiting(d, m) => EngineEvent::Waiting { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deploying(d, m) => EngineEvent::Deploying { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Pausing(d, m) => EngineEvent::Pausing { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deleting(d, m) => EngineEvent::Deleting { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deployed(d, m) => EngineEvent::Deployed { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Paused(d, m) => EngineEvent::Paused { - details: EventDetails::from(d), - message: EventMessage::from(m), - }, - events::EngineEvent::Deleted(d, m) => EngineEvent::Deleted { - details: EventDetails::from(d), - message: EventMessage::from(m), + message: m.map(EventMessage::from), }, } } @@ -308,10 +242,7 @@ pub struct EventDetails { impl From for EventDetails { fn from(details: events::EventDetails) -> Self { - let provider_kind = match details.provider_kind { - Some(kind) => Some(Kind::from(kind)), - None => None, - }; + let provider_kind = details.provider_kind.map(Kind::from); EventDetails { provider_kind, organisation_id: details.organisation_id.to_string(), diff --git a/src/events/mod.rs b/src/events/mod.rs index 659b3d37..deb2eb19 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::field_reassign_with_default)] +#![allow(clippy::large_enum_variant)] #![allow(deprecated)] pub mod io; @@ -6,7 +8,7 @@ extern crate url; use crate::cloud_provider::Kind; use crate::errors::{CommandError, EngineError}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; use std::fmt::{Display, Formatter}; #[derive(Debug, Clone)] @@ -20,29 +22,6 @@ pub enum EngineEvent { Warning(EventDetails, EventMessage), /// Error: represents an error event. Error(EngineError, Option), - /// Waiting: represents an engine waiting event. - /// - /// Engine is waiting for a task to be done. - #[deprecated(note = "event status is carried by EventDetails directly")] - Waiting(EventDetails, EventMessage), - /// Deploying: represents an engine deploying event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deploying(EventDetails, EventMessage), - /// Pausing: represents an engine pausing event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Pausing(EventDetails, EventMessage), - /// Deleting: represents an engine deleting event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleting(EventDetails, EventMessage), - /// Deployed: represents an engine deployed event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deployed(EventDetails, EventMessage), - /// Paused: represents an engine paused event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Paused(EventDetails, EventMessage), - /// Deleted: represents an engine deleted event. - #[deprecated(note = "event status is carried by EventDetails directly")] - Deleted(EventDetails, EventMessage), } impl EngineEvent { @@ -53,13 +32,6 @@ impl EngineEvent { EngineEvent::Info(details, _message) => details, EngineEvent::Warning(details, _message) => details, EngineEvent::Error(engine_error, _message) => engine_error.event_details(), - EngineEvent::Waiting(details, _message) => details, - EngineEvent::Deploying(details, _message) => details, - EngineEvent::Pausing(details, _message) => details, - EngineEvent::Deleting(details, _message) => details, - EngineEvent::Deployed(details, _message) => details, - EngineEvent::Paused(details, _message) => details, - EngineEvent::Deleted(details, _message) => details, } } @@ -70,13 +42,6 @@ impl EngineEvent { EngineEvent::Info(_details, message) => message.message(message_verbosity), EngineEvent::Warning(_details, message) => message.message(message_verbosity), EngineEvent::Error(engine_error, _message) => engine_error.message(), - EngineEvent::Waiting(_details, message) => message.message(message_verbosity), - EngineEvent::Deploying(_details, message) => message.message(message_verbosity), - EngineEvent::Pausing(_details, message) => message.message(message_verbosity), - EngineEvent::Deleting(_details, message) => message.message(message_verbosity), - EngineEvent::Deployed(_details, message) => message.message(message_verbosity), - EngineEvent::Paused(_details, message) => message.message(message_verbosity), - EngineEvent::Deleted(_details, message) => message.message(message_verbosity), } } } @@ -132,11 +97,7 @@ impl EventMessage { EventMessageVerbosity::SafeOnly => self.safe_message.to_string(), EventMessageVerbosity::FullDetails => match &self.full_details { None => self.safe_message.to_string(), - Some(details) => format!( - "{} / Full details: {}", - self.safe_message.to_string(), - details.to_string() - ), + Some(details) => format!("{} / Full details: {}", self.safe_message, details), }, } } @@ -154,7 +115,7 @@ impl Display for EventMessage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// Stage: represents an engine event stage, can be General, Infrastructure or Environment. pub enum Stage { /// GeneralStep: general stage in the engine, usually used across all stages. @@ -190,7 +151,7 @@ impl Display for Stage { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// GeneralStep: represents an engine general step usually shared across all engine stages pub enum GeneralStep { /// ValidateSystemRequirements: validating system requirements @@ -218,7 +179,7 @@ impl Display for GeneralStep { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// InfrastructureStep: represents an engine infrastructure step. pub enum InfrastructureStep { /// LoadConfiguration: first step in infrastructure, aiming to load all configuration (from Terraform, etc). @@ -255,7 +216,7 @@ impl Display for InfrastructureStep { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// EnvironmentStep: represents an engine environment step. pub enum EnvironmentStep { /// LoadConfiguration: first step in environment, aiming to load all configuration (from Terraform, etc). @@ -309,7 +270,7 @@ type TransmitterName = String; /// TransmitterType: represents a transmitter type. type TransmitterType = String; // TODO(benjaminch): makes it a real enum / type -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// Transmitter: represents the event's source caller (transmitter). pub enum Transmitter { /// BuildPlatform: platform aiming to build applications images. @@ -358,7 +319,7 @@ impl Display for Transmitter { /// Region: represents event's cloud provider region. type Region = String; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] /// EventDetails: represents an event details, carrying all useful data such as Qovery identifiers, transmitter, stage etc. pub struct EventDetails { /// provider_kind: cloud provider name. an be set to None if not linked to any provider kind. @@ -411,7 +372,7 @@ impl EventDetails { /// TODO(benjaminch): remove this dirty hack pub fn clone_changing_stage(event_details: EventDetails, stage: Stage) -> Self { - let mut event_details = event_details.clone(); + let mut event_details = event_details; event_details.stage = stage; event_details } @@ -466,18 +427,8 @@ mod tests { EventMessageVerbosity::SafeOnly, "safe".to_string(), ), - ( - "safe".to_string(), - None, - EventMessageVerbosity::SafeOnly, - "safe".to_string(), - ), - ( - "safe".to_string(), - None, - EventMessageVerbosity::FullDetails, - "safe".to_string(), - ), + ("safe".to_string(), None, EventMessageVerbosity::SafeOnly, "safe".to_string()), + ("safe".to_string(), None, EventMessageVerbosity::FullDetails, "safe".to_string()), ( "safe".to_string(), Some("raw".to_string()), @@ -524,30 +475,12 @@ mod tests { Stage::Infrastructure(InfrastructureStep::LoadConfiguration), InfrastructureStep::LoadConfiguration.to_string(), ), - ( - Stage::Environment(EnvironmentStep::Pause), - EnvironmentStep::Pause.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Resume), - EnvironmentStep::Resume.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Build), - EnvironmentStep::Build.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Delete), - EnvironmentStep::Delete.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Update), - EnvironmentStep::Update.to_string(), - ), - ( - Stage::Environment(EnvironmentStep::Deploy), - EnvironmentStep::Deploy.to_string(), - ), + (Stage::Environment(EnvironmentStep::Pause), EnvironmentStep::Pause.to_string()), + (Stage::Environment(EnvironmentStep::Resume), EnvironmentStep::Resume.to_string()), + (Stage::Environment(EnvironmentStep::Build), EnvironmentStep::Build.to_string()), + (Stage::Environment(EnvironmentStep::Delete), EnvironmentStep::Delete.to_string()), + (Stage::Environment(EnvironmentStep::Update), EnvironmentStep::Update.to_string()), + (Stage::Environment(EnvironmentStep::Deploy), EnvironmentStep::Deploy.to_string()), ]; for tc in test_cases { diff --git a/src/fs.rs b/src/fs.rs index 1fa9eb8e..7a68a48f 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -107,7 +107,7 @@ pub fn cleanup_workspace_directory(working_root_dir: &str, execution_id: &str) - return match crate::fs::root_workspace_directory(working_root_dir, execution_id) { Ok(workspace_dir) => match std::fs::remove_dir_all(match workspace_dir.strip_suffix("/.") { Some(striped_workspace_dir) => striped_workspace_dir, // Removing extra dir name allowing to delete directory properly ("/dir/." => "dir") - None => workspace_dir.as_str().clone(), + None => &workspace_dir, }) { Ok(_) => Ok(()), Err(err) => { @@ -168,15 +168,11 @@ mod tests { // setup: let execution_id: &str = "123"; let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); - let root_dir = format!( - "{}/.qovery-workspace/{}", - tmp_dir.path().to_str().unwrap(), - execution_id - ); + let root_dir = format!("{}/.qovery-workspace/{}", tmp_dir.path().to_str().unwrap(), execution_id); let root_dir_path = Path::new(root_dir.as_str()); let directories_to_create = vec![ - format!("{}", root_dir), + root_dir.to_string(), format!("{}/.terraform", root_dir), format!("{}/.terraform/dir-1", root_dir), format!("{}/dir-1", root_dir), @@ -215,10 +211,8 @@ mod tests { .collect::>(); // execute: - let result = archive_workspace_directory( - tmp_dir.path().to_str().expect("error getting file path string"), - execution_id, - ); + let result = + archive_workspace_directory(tmp_dir.path().to_str().expect("error getting file path string"), execution_id); // verify: assert_eq!(true, result.is_ok()); @@ -255,7 +249,7 @@ mod tests { } // clean: - tmp_files.into_iter().for_each(|f| drop(f)); + tmp_files.into_iter().for_each(drop); tmp_dir.close().expect("error closing temporary directory"); } } diff --git a/src/git.rs b/src/git.rs index 4222e887..44a50978 100644 --- a/src/git.rs +++ b/src/git.rs @@ -11,16 +11,13 @@ use url::Url; // or an error to specify that we have exhausted everything we are able to provide fn authentication_callback<'a>( get_credentials: &'a impl Fn(&str) -> Vec<(CredentialType, Cred)>, -) -> impl FnMut(&str, Option<&str>, CredentialType) -> Result + 'a { +) -> impl FnMut(&str, Option<&str>, CredentialType) -> Result + '_ { let mut current_credentials: (String, Vec<(CredentialType, Cred)>) = ("".into(), vec![]); - return move |remote_url, username_from_url, allowed_types| { + move |remote_url, username_from_url, allowed_types| { // If we have changed remote, reset our available auth methods if remote_url != current_credentials.0 { - current_credentials = ( - remote_url.to_string(), - get_credentials(username_from_url.unwrap_or("git")), - ); + current_credentials = (remote_url.to_string(), get_credentials(username_from_url.unwrap_or("git"))); } let auth_methods = &mut current_credentials.1; @@ -43,7 +40,7 @@ fn authentication_callback<'a>( return Ok(credential); } } - }; + } } fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, Error> { @@ -68,17 +65,14 @@ fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result, } fn clone

( - repository_url: &str, + repository_url: &Url, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, ) -> Result where P: AsRef, { - let url = Url::parse(repository_url) - .map_err(|err| Error::from_str(format!("Invalid repository url {}: {}", repository_url, err).as_str()))?; - - if url.scheme() != "https" { + if repository_url.scheme() != "https" { return Err(Error::from_str("Repository URL have to start with https://")); } @@ -98,11 +92,11 @@ where let _ = std::fs::remove_dir_all(into_dir.as_ref()); } - repo.clone(url.as_str(), into_dir.as_ref()) + repo.clone(repository_url.as_str(), into_dir.as_ref()) } pub fn clone_at_commit

( - repository_url: &str, + repository_url: &Url, commit_id: &str, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, @@ -140,7 +134,7 @@ where } pub fn get_parent_commit_id

( - repository_url: &str, + repository_url: &Url, commit_id: &str, into_dir: P, get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>, @@ -164,6 +158,7 @@ where mod tests { use crate::git::{checkout, clone, clone_at_commit, get_parent_commit_id}; use git2::{Cred, CredentialType}; + use url::Url; use uuid::Uuid; struct DirectoryForTests { @@ -175,7 +170,7 @@ mod tests { /// Since tests are runs in parallel and eventually on the same node, it will avoid having directories collisions between tests running on the same node. pub fn new_with_random_suffix(base_path: String) -> Self { DirectoryForTests { - path: format!("{}_{}", base_path, Uuid::new_v4().to_string()), + path: format!("{}_{}", base_path, Uuid::new_v4()), } } @@ -196,18 +191,26 @@ mod tests { let repo_path = repo_dir.path(); // We only allow https:// at the moment - let repo = clone("git@github.com:Qovery/engine.git", &repo_path, &|_| vec![]); - assert!(matches!(repo, Err(e) if e.message().contains("Invalid repository"))); + let repo = clone( + &Url::parse("ssh://git@github.com/Qovery/engine.git").unwrap(), + &repo_path, + &|_| vec![], + ); + assert!(matches!(repo, Err(e) if e.message().contains("https://"))); // Repository must be empty - let repo = clone("https://github.com/Qovery/engine-testing.git", &repo_path, &|_| vec![]); + let repo = clone( + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), + &repo_path, + &|_| vec![], + ); assert!(repo.is_ok()); // clone makes sure to empty the directory // Working case { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_clone".to_string()); let repo = clone( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![], ); @@ -224,7 +227,7 @@ mod tests { )] }; let repo = clone( - "https://gitlab.com/qovery/q-core.git", + &Url::parse("https://gitlab.com/qovery/q-core.git").unwrap(), clone_dir.path(), &get_credentials, ); @@ -261,7 +264,7 @@ mod tests { fn test_git_checkout() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_checkout".to_string()); let repo = clone( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), clone_dir.path(), &|_| vec![], ) @@ -283,7 +286,7 @@ mod tests { fn test_git_parent_id() { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_parent_id".to_string()); let result = get_parent_commit_id( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "964f02f3a3065bc7f6fb745d679b1ddb21153cc7", clone_dir.path(), &|_| vec![], @@ -299,7 +302,7 @@ mod tests { let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_parent_id_not_existing".to_string()); let result = get_parent_commit_id( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "964f02f3a3065bc7f6fb745d679b1ddb21153cc0", clone_dir.path(), &|_| vec![], @@ -313,7 +316,7 @@ mod tests { fn test_git_submodule_with_ssh_key() { // Unique Key only valid for the submodule and in read access only // https://github.com/Qovery/dumb-logger/settings/keys - let ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0NCmIzQmxibk56YUMxclpYa3RkakVBQUFBQUJHNXZibVVBQUFBRWJtOXVaUUFBQUFBQUFBQUJBQUFCbHdBQUFBZHpjMmd0Y24NCk5oQUFBQUF3RUFBUUFBQVlFQTFGcS95ZGF6dU84T3ZRdjVUNEdxbndOMjhZV0EzaXlqanREMFdSQXhtdDZEV3lJRlVYZ1gNClZFZ1ZVYnZyYndKNGJQa0tTbkdqd1hZRUdJYkdYa0hKUTdvWTVSMnB6b1hqUkVYTzIzZEZ2aVp4bUpOcVdEVVJqSHhjc1INCndOYWxiOFVZZVBCRVI4TEQzWWpQd0lYNXdCWm5VSjZLWTJFbXhjSlBVUnV4bUlyTjI4QndiZ3FiejJPU3NJdWg4a1ZwSngNCldheitFc3JNM282NHpHMm0wa0dxMVI1VHE0enBPRWliUk1iY1ZXTldKUzRZR29JczdsRzB0ZHZndktNRnJsWktzSUw1Y2ENCkFOQzRXTlROMm1DVVFrVGpGSDVySDlDa0ZBZjZaZ0lqYklvN0s3TTc0L1B5RVhEcStyRW5vRWdzeEkzRi9NZHMydGM2RWkNClJaY2JrUmRLVnpaUzJCMXdKNDhrOGR3Sml5VytKSWY4ejEzK2FiUXVPNGR5MWRnM2gwbEZ6dm9qaVYxTjNBRXdHcmhjZEUNClo3TXNaeThKM3JvRElZSWZCczdkbmh2T1FrME1taEpKSEpMaVlEZWZCYUk4MVdGTGlqekUxejhqMG90cExlNkt0SVhQYk8NCmV5WWdod0U2aDlhSmNrOEU3WklYMjc4MGRQMW93T2g1dC9VaE0vdjFBQUFGZ082eU9GenVzamhjQUFBQUIzTnphQzF5YzINCkVBQUFHQkFOUmF2OG5XczdqdkRyMEwrVStCcXA4RGR2R0ZnTjRzbzQ3UTlGa1FNWnJlZzFzaUJWRjRGMVJJRlZHNzYyOEMNCmVHejVDa3B4bzhGMkJCaUd4bDVCeVVPNkdPVWRxYzZGNDBSRnp0dDNSYjRtY1ppVGFsZzFFWXg4WExFY0RXcFcvRkdIancNClJFZkN3OTJJejhDRitjQVdaMUNlaW1OaEpzWENUMUVic1ppS3pkdkFjRzRLbTg5amtyQ0xvZkpGYVNjVm1zL2hMS3pONk8NCnVNeHRwdEpCcXRVZVU2dU02VGhJbTBURzNGVmpWaVV1R0JxQ0xPNVJ0TFhiNEx5akJhNVdTckNDK1hHZ0RRdUZqVXpkcGcNCmxFSkU0eFIrYXgvUXBCUUgrbVlDSTJ5S095dXpPK1B6OGhGdzZ2cXhKNkJJTE1TTnhmekhiTnJYT2hJa1dYRzVFWFNsYzINClV0Z2RjQ2VQSlBIY0NZc2x2aVNIL005ZC9tbTBManVIY3RYWU40ZEpSYzc2STRsZFRkd0JNQnE0WEhSR2V6TEdjdkNkNjYNCkF5R0NId2JPM1o0YnprSk5ESm9TU1J5UzRtQTNud1dpUE5WaFM0bzh4TmMvSTlLTGFTM3VpclNGejJ6bnNtSUljQk9vZlcNCmlYSlBCTzJTRjl1L05IVDlhTURvZWJmMUlUUDc5UUFBQUFNQkFBRUFBQUdCQUxhR1pqRkwvV0NwQWtjV0lxM25LMHZRZzQNCjBuamxQcGxKQXVKTWprOVc1RGNpNkQrSVJGTC9BK29TeUcxTit2Qk9uTnliMmhIZnNzd0dxQWRjTVEwcmtISFZ6WitWbk4NCmxVSGFxdW5UQkR4aitPSUhXN0lEczFqSWtEZWZnQngyTmh5eDR3anRBTHBhVW1ja1B1SkhTcURSV3JvQkc1c01Uc3RwWmwNCnNtb0diTmxFK0o1dE9lMnhqYVYzNzdRNVd4L0FIemd0T09RemZNL3lTZjMzTDhCS1Y0a3J4eXV3ZW95T1Q5OU9ia0ltaUUNCnpTMEQxVERuUStmSTNjdm1aL3lvcDZ0clA0a01wdWtWdC93ZUhFWU5nZkdPdHVHMndwU3oyRmpNcUcyT1NFd3ZpRXM3U0YNCmlwTGNWc2dpUzg3ckI5ZFBRejFYTGhhdW9MTDliY3BlOE9sZW50VkI5VHFaU1lqaTJoeUNtZG5id25CS2QyMGVaUlh0S3QNCnh3SUpDdkpESGwyWk9wTVVUcnIydFcwSkVFZU1QSDJWMCs4amg3aGxlQ0NLcDhmdE1pcGVuWTdvelR1M1JVTUdNcjB4eTINCmhUalVJNkVGU0ppVGlKVE9ibGVhcGVPMVE1czdHaU5ibmdZQXFhN3h3RmJuYllrODJ3ekxPbzdEUjYzODhJbzVQcEFRQUENCkFNQUtXbURSMWU5bXlncm8wZmtQUDQ3dGsxMnF5bWpkQzVtRU1SNm9TOTNMbGRaK1ptKzBxVlBxN1BSQ3JPZlpLcFJSQ1UNCmJOUkM0ZFJhUHk0ek85cEdqdzE3ZlhjUGxGQzRaQUN1anhnRzhvazdYNEdGVlZEQ2lySFRySFhWN0ozNUtPMnR5MloyR2UNCms2L0dhMUpCMlBLN0tJZFlnMWpjY3lUR0FsZTlmcjIyU21nZHVoUmt2WlZsVU9mMHp2ZDhERzlVcktYUURWTERHd1QrWlkNClp2ODhYdGduZzZneU1jZXhZaHZZY04yMUo4ay9wNmM1ZGVuUXNNL0QxN0Qyck9iNE1BQUFEQkFPcDBJWitTVWxXY0xzbjMNCmVwQk1pTVAwdm5LUTI4UUd4NDl1bW14VXdhMTI0djk5YzhtTXZ5TXJPYnFsODdjZjQwWTlqdUhsSGZKSzd0MXhNdE5qU3QNCkJWRlNjU2E5Sk56S0hKRTJaYlJma1d1ZXpScytGbytKcjU0YVppQjNvcjNFeUtaamNZY2RFTG5ROHNjNmJXd25Ic29WSHkNCmNpTThtcUhudHRqeXJPZFdJRi9CTURlYjF5WkliYlQ0aWN3Y1N2TEJOVE95dllwakg1RWNsTXdXcWlsQ2NxVVJyTmtZVXMNCnJWZkFabDZuUmE5N0FNNDd6THhBT0RZT1FzbjZhdk5RQUFBTUVBNTk2ejRYZkxrQ09MT3drUi85NS90WEYzS3p4MjFsdC8NCllBVExmRlBKbHdNaGRxN1d2VG9LZWxNV0QwNUxXYlZxYitNOGU3SWZSQlducEp0V1RxMVBCY3ltT2k1TkprSmZnWWhqdGgNCjlqT1k4WTVCWWlvcENRUUFtTWc3SHF3a0xUSUdUU25IdDN5ZGFTK21TaVFTQUhLb1VKbmp4cEdLQ3ZyVGk5eHdxTFpZT1YNClZvOHFCZ003M1c1TWUyQWI0YnpPaEt4Tm9iTFpqWkxqZDJoeHRyWENJaityRXVRa09NT1hGTmR6NkFDR0hwQ09KTGp4clUNCmk4TGNwd2c5NlpWZkhCQUFBQUNtVnlaV0psUUhOMGVYZz0NCi0tLS0tRU5EIE9QRU5TU0ggUFJJVkFURSBLRVktLS0tLQ==").unwrap()).unwrap(); + let ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFNd0FBQUF0emMyZ3RaVwpReU5UVXhPUUFBQUNBTzZlaGNrV0JrNlcwd3lTZ0FIY0dSY3JneW1IVThqRWVKRm5yQ2k1ZjZaQUFBQUpERlV0TVZ4VkxUCkZRQUFBQXR6YzJndFpXUXlOVFV4T1FBQUFDQU82ZWhja1dCazZXMHd5U2dBSGNHUmNyZ3ltSFU4akVlSkZuckNpNWY2WkEKQUFBRUQ0aGwvTmk0aGgvK3oxUm4wdWtMcm5mQ0xrN1BUWmErbVNQYk01ZS9aS0pnN3A2RnlSWUdUcGJUREpLQUFkd1pGeQp1REtZZFR5TVI0a1dlc0tMbC9wa0FBQUFDbVZ5WldKbFFITjBlWGdCQWdNPQotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K").unwrap()).unwrap(); let invalid_ssh_key = String::from_utf8(base64::decode("LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQ21GbGN6STFOaTFqZEhJQUFBQUdZbU55ZVhCMEFBQUFHQUFBQUJCNzZzbWIzVgp5WFB3SE12dm8zWTB5M0FBQUFFQUFBQUFFQUFBR1hBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDOVZHbm13cjZCClRHdWxzODhEaXRXaE5IUUoxMjV0eGxHa2EzNDNxUVB2S3dSc2VxN05SdFAzY2IxbDRMZytzdWozZ0lQYU5yM295SlBoRDIKZmIxbzF1cUFiOStkbWhwQXc4L1lCa05NZkRrdDRTWEpGZjZ3dUZwa1p4SHF3czNZUXF6cjhicVJaaHA0bXlnc2VwNFVHOApBaGxVMG5CUXFBREFhS3dBcmpLeUdBeWwwenRDYVdObm9sOVRZSmZuNEpOQW5YUDFONmMxMUVaRm5wKzJsMTVoSVdNd2NKClpCMnFFeTFSZzFVNXpuOVNSOURIVXhvN2p0ZkkrdWJWbHdnelBQaDVjZzAydVc0K0JwcFg1UGlpZ04rQlBNajc3WEJ0VTQKZzU3MmRDZHBSRjk3NjJ5SDBsY21nSkRqVnhnOTludVVGRDlwVG9nUTRrUENrdUluNmcxS3JObFdqY1R2c1hFS2JVS0xqawpkQkR2Yk1tbzZBaHJXRFhDSjZqRUN0T2Jka29XMGVjTGU4cXB3Nmh5N1NmdWppSm9QbnVsazRWenMwR2xPa3VPU0JIUmhJClhSc25NaFNiNnh2dDl6QldJcklvZDZoWnhuQ0V2SWRESzlacVBnOXJpbXc4bG8rUkFwdm1ySnRINUhsbFJiYWh4K2RUU1cKM2hCa1BlMnNDL1UvRUFBQVdBVXBEOTFIQTAzSnQyNFFSSFVXRDAvVTJGMTBzZE5WN0w4bkhMeVNibFBnSFhMc3lpSTFxOQo0NXBOUEQyNElBakNzQ08rVHREcXc3MDhlNXliUWhXUCsybkxtdGQwclEyTXh3SnZwUjlGcEV6UDFyejRYUDVUbzZDN3N1CmZpd0JPZWd6bjhQT1hGSmRvRk9Ud3E3dWhaM201NE93NHZvZkFKSHdtYWtwTGZMd2R1TnQ3S1RNQkVpT3VlM0ZXTGtCR0wKQUE1RGtoYVlpVGgyajB2YU9jUWhxZVphVEp6V2tidUcvb29DK1cwcTVXcFNZdFlxREFhWEh0bG8rZGtOMFEzZVVhcm1FTQpGcy9tdEpha3dhOVhCMVgzMndKbUpIdmN0OG4vVzA1T0N5V0U1Y2szeitRQVB3a2pGK0hKOGlOZDluVk5zckx1T010a2VQCk1aMTZreTg5WUVSZVQ1QXRJU1lRd0JQU2tsTFZKL3VaOCszK2Vyc3JrOW1aakw3ZXpISnV4ZysxUmR1T3BPeWpXMTRoTGYKblJQTDlKOXgvZWZ2MFV0L3BpR3M5NEFRcFFVZnJFdXpjL1dmejRocUtzVUxnT0VnblZBWXpuSksyWHJGeTN4aWlKVkFVUQpZcm4xak9lU1oyTWV0cjJvd05VdVM3cEhGTHZIWURRWklURmxVaFlOYUx0ejV5WU9HTCtFbEVxQm4wT1FFenNESDhROEpFCk5jWGVxUjFRTE4rTUJaMFZqQ2Q3T0ExTGpXZVVrdjNMaFJER3lPS3RjWk5OeFl5MkgwRWlmYzIvRHpLMnlpcVRQWUdMbHYKOWhZTlZZcC8xOGxhUkFOL040MlVDMjRmS0hFZ2lYVTNnL3RCZkZmbEFBWThKSE9sQUJEdXFWYjJkWHZKdXFLeUJMUElqVQo5cVl5VXNOVXhWS2M2ZWh4VU4wcVlnTmV2Z0JmMXVSZkxCY2c3SjVJVDZQQ2dSa3lNenBRakY1RkhuM0J6SVMrb3ZFSnNaCk5LNklYbDJIY3FncExTWUFkTFZlZEZOUzlkVU01blpMdlJEMjkyc0FQWm5aaU91Z3pwSWNrMllFcXpscjc2NXlUakRJdWgKR3kvdFlBQ3FIZHV4S2pMdGc0OXpjZjdNN2xESGNuVEY1MlJsazEyR2x1emZGK1dhZDF3eUFKVnNyUmtqVFZYVHhnTEV6MQo4SzF0WUtVOWoyc3grUE1Vd0JxM3lQR2lTaEgydWp6em82SUc1cnVYSTAwZXVkT2t1NVVrSHhBVnJneUI1S0M2VFRMR1BYCnhQMFN5Zk12dXJycDdvMnhsK2dkSVc0c0dudEJ2V0RHRVFSY0RxbWdLV0tuNTNsbmg5U1Urcmh2UkdhRFJueENuYkNwUEUKTE82V0lKUXVPQm54bzhWcGU0R2JLc2NmSktKSzlZV2ZIOFEvYzBncnE0ZDh5ZmRwUG1uc3hHOEpoTFVuMEhpRFEzQytaMgpzU1RPeU85TDAySUZIdDdIUEY2OWRWR3c3M0pPU1FiL05GK2g5cGRVazBScGNRdGFaTm9TMHg2a3RCQXljK0o0VUpUYTliCkdENWRaSE1KVHBvcWFZUDV0dFlnMjlBQkpUUURMa0tnbWxWRGNtK28zRTN3cTlySWFXMlhpNDQrc3RnTVJVS1J5R041d1EKM2xTWjk1QXBpWFlpRkNONUVrWitUci96TDAraVdwUHRCRzlJZmlGbmlqVlVYUnpEWHZxeGE1QTQ1YUlNWDhad2U5ckxFdAphaVRaOUI5d2tVb0tYdXlDU3plQXhMTGU2aG8wLzBDbmhSR3NoVGg1UDd6aFA4bVExRGZMYlFCRU0zOHJMWlplMExVVVhZCkZpZkFXc3BFRDk2VjBMckhxRkd0Z0dzd1NQcWRBRzBPTDBWekRUbFRucDJVWDY0SEhjUzF2MUMyQnNxbllWbkJNL3p5aUYKQXhabDB4cGRPUVVuKzV2V2VHUXZsQkhGeU0vQmtXRVhMbjc1YVNQL3JwcnlZeGdOeWx2M2NiRWNYZXoyWXdLM2UrN1NnZAoxRzFZUVVtNStqNy90Q0x5aFluL1VjRzJhTHJNc3pRY1FoWTE4Sk9IOXF6a2FacWdYckFybnE0dWluT25sbFBKaGJ3ZTVrCmgvMmdyTlVqbEsrRHYxQ2dGZUVDcm9yRHo4L3ZxZW1QNXdVWWF5bFNWWVZ3UHM1bkxDQWUrVlNobFlIOXlNb3JwanNXc3MKYlg0UlAvVGd3TmNtRnBuZ21kTXppNmtIUXhSc2pUT3VxZ3Vsb01FUVZmQ3JkNGxBeWp3eVhRaEcrd2dWMXBuempCZlR4eQpZeFBrc1VGaTg3aEVkZ1RPZ2M5MHlNamVoVGhHOGRMWGEvd0NOU0hLZ1pBbFBZbWdLd2ZvcFlBMjQxdUlxR2J0WUtqSTFSCnVHU2JqSU80dUVYbkJ5eWVZTnA3Z29iR2NVc1BGV0doY1FPV05QZnl5K1crQ0xhKzVpYkJCZEF2NStVdlZZUHFGMHhTNy8KUm1TbW9BPT0KLS0tLS1FTkQgT1BFTlNTSCBQUklWQVRFIEtFWS0tLS0t").unwrap()).unwrap(); let clone_dir = DirectoryForTests::new_with_random_suffix("/tmp/engine_test_submodule".to_string()); let get_credentials = |user: &str| { @@ -333,7 +336,7 @@ mod tests { ] }; let repo = clone_at_commit( - "https://github.com/Qovery/engine-testing.git", + &Url::parse("https://github.com/Qovery/engine-testing.git").unwrap(), "9a9c1f4373c8128151a9def9ea3d838fa2ed33e8", clone_dir.path(), &get_credentials, diff --git a/src/models.rs b/src/io_models.rs similarity index 64% rename from src/models.rs rename to src/io_models.rs index fb3d812b..e9410a96 100644 --- a/src/models.rs +++ b/src/io_models.rs @@ -2,53 +2,75 @@ use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; use std::hash::Hash; use std::net::Ipv4Addr; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use chrono::{DateTime, Utc}; -use git2::{Cred, CredentialType, Error}; use itertools::Itertools; use rand::distributions::Alphanumeric; use rand::Rng; use serde::{Deserialize, Serialize}; +use url::Url; -use crate::build_platform::{Build, BuildOptions, Credentials, GitRepository, Image, SshKey}; -use crate::cloud_provider::aws::databases::mongodb::MongoDB; -use crate::cloud_provider::aws::databases::mysql::MySQL; -use crate::cloud_provider::aws::databases::postgresql::PostgreSQL; -use crate::cloud_provider::aws::databases::redis::Redis; -use crate::cloud_provider::service::{DatabaseOptions, StatefulService, StatelessService}; +use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; +use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; +use crate::cloud_provider::aws::databases::mysql::MySQLAws; +use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; +use crate::cloud_provider::aws::databases::redis::RedisAws; +use crate::cloud_provider::aws::router::RouterAws; +use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; +use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; +use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; +use crate::cloud_provider::digitalocean::databases::redis::RedisDo; +use crate::cloud_provider::digitalocean::router::RouterDo; +use crate::cloud_provider::environment::Environment; +use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; +use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; +use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; +use crate::cloud_provider::scaleway::databases::redis::RedisScw; +use crate::cloud_provider::scaleway::router::RouterScw; +use crate::cloud_provider::service::DatabaseOptions; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; -use crate::git; +use crate::cmd::docker::Docker; +use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; -use crate::utilities::get_image_tag; +use crate::models; +use crate::models::application::{ApplicationError, IApplication}; +use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; +use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; +use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; +use crate::models::types::{AWS, DO, SCW}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { - raw: String, + raw_long_id: String, short: String, } impl QoveryIdentifier { - pub fn new(raw: String) -> Self { + pub fn new(raw_long_id: String, raw_short_id: String) -> Self { QoveryIdentifier { - raw: raw.to_string(), - short: QoveryIdentifier::extract_short(raw.as_str()), + raw_long_id, + short: raw_short_id, } } + pub fn new_from_long_id(raw_long_id: String) -> Self { + QoveryIdentifier::new(raw_long_id.to_string(), QoveryIdentifier::extract_short(raw_long_id.as_str())) + } + pub fn new_random() -> Self { - Self::new(uuid::Uuid::new_v4().to_string()) + Self::new_from_long_id(uuid::Uuid::new_v4().to_string()) } fn extract_short(raw: &str) -> String { - let max_execution_id_chars: usize = 7; - match raw.char_indices().nth(max_execution_id_chars) { + let max_execution_id_chars: usize = 8; + match raw.char_indices().nth(max_execution_id_chars - 1) { None => raw.to_string(), - Some((idx, _)) => raw[..idx].to_string(), + Some((_, _)) => raw[..max_execution_id_chars].to_string(), } } @@ -59,26 +81,18 @@ impl QoveryIdentifier { impl From for QoveryIdentifier { fn from(s: String) -> Self { - QoveryIdentifier::new(s) + QoveryIdentifier::new_from_long_id(s) } } impl Display for QoveryIdentifier { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(self.raw.as_str()) + f.write_str(self.raw_long_id.as_str()) } } #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub enum EnvironmentAction { - Environment(TargetEnvironment), -} - -pub type TargetEnvironment = Environment; -pub type FailoverEnvironment = Environment; - -#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct Environment { +pub struct EnvironmentRequest { pub execution_id: String, pub id: String, pub owner_id: String, @@ -91,57 +105,47 @@ pub struct Environment { pub clone_from_environment_id: Option, } -impl Environment { - pub fn to_qe_environment( +impl EnvironmentRequest { + pub fn to_environment_domain( &self, context: &Context, - built_applications: &Vec>, cloud_provider: &dyn CloudProvider, + container_registry: &ContainerRegistryInfo, logger: Box, - ) -> crate::cloud_provider::environment::Environment { - let applications = self - .applications - .iter() - .map(|x| match built_applications.iter().find(|y| x.id.as_str() == y.id()) { - Some(app) => x.to_stateless_service(context, app.image().clone(), cloud_provider, logger.clone()), - _ => x.to_stateless_service(context, x.to_image(), cloud_provider, logger.clone()), - }) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) - .collect::>(); + ) -> Result { + let mut applications = Vec::with_capacity(self.applications.len()); + for app in &self.applications { + match app.to_application_domain(context, app.to_build(container_registry), cloud_provider, logger.clone()) { + Ok(app) => applications.push(app), + Err(err) => { + return Err(err); + } + } + } + //FIXME: remove those flatten as it hide errors regarding conversion to model data type let routers = self .routers .iter() - .map(|x| x.to_stateless_service(context, cloud_provider, logger.clone())) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) + .filter_map(|x| x.to_router_domain(context, cloud_provider, logger.clone())) .collect::>(); - // orders is important, first external services, then applications and then routers. - let mut stateless_services = applications; - // routers are deployed lastly to avoid to be blacklisted if we request TLS certificates - // while an app does not start for some reason. - stateless_services.extend(routers); - let databases = self .databases .iter() - .map(|x| x.to_stateful_service(context, cloud_provider, logger.clone())) - .filter(|x| x.is_some()) - .map(|x| x.unwrap()) + .filter_map(|x| x.to_database_domain(context, cloud_provider, logger.clone())) .collect::>(); - let stateful_services = databases; - - crate::cloud_provider::environment::Environment::new( + Ok(Environment::new( self.id.as_str(), self.project_id.as_str(), self.owner_id.as_str(), self.organization_id.as_str(), - stateless_services, - stateful_services, - ) + self.action.to_service_action(), + applications, + routers, + databases, + )) } } @@ -214,18 +218,18 @@ pub struct Application { } impl Application { - pub fn to_application<'a>( + pub fn to_application_domain( &self, context: &Context, - image: &Image, + build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(crate::cloud_provider::aws::application::Application::new( + CPKind::Aws => Ok(Box::new(models::application::Application::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -237,173 +241,68 @@ impl Application { self.min_instances, self.max_instances, self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - listeners, - logger, - ))), - CPKind::Do => Some(Box::new( - crate::cloud_provider::digitalocean::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger, - ), - )), - CPKind::Scw => Some(Box::new( - crate::cloud_provider::scaleway::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image.clone(), - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger, - ), - )), - } - } - - pub fn to_stateless_service( - &self, - context: &Context, - image: Image, - cloud_provider: &dyn CloudProvider, - logger: Box, - ) -> Option> { - let environment_variables = to_environment_variable(&self.environment_vars); - let listeners = cloud_provider.listeners().clone(); - - match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(crate::cloud_provider::aws::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, + build, self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), environment_variables, + AwsAppExtraSettings {}, listeners, logger.clone(), - ))), - CPKind::Do => Some(Box::new( - crate::cloud_provider::digitalocean::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ), - )), - CPKind::Scw => Some(Box::new( - crate::cloud_provider::scaleway::application::Application::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - image, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ), - )), + )?)), + CPKind::Do => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + DoAppExtraSettings {}, + listeners, + logger.clone(), + )?)), + CPKind::Scw => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + ScwAppExtraSettings {}, + listeners, + logger.clone(), + )?)), } } - pub fn to_image(&self) -> Image { - self.to_image_with_commit(&self.commit_id) - } - - pub fn to_image_from_parent_commit

(&self, clone_repo_into_dir: P) -> Result, Error> - where - P: AsRef, - { - let parent_commit_id = git::get_parent_commit_id( - self.git_url.as_str(), - self.commit_id.as_str(), - clone_repo_into_dir, - &|_| match &self.git_credentials { - None => vec![], - Some(creds) => vec![( - CredentialType::USER_PASS_PLAINTEXT, - Cred::userpass_plaintext(creds.login.as_str(), creds.access_token.as_str()).unwrap(), - )], - }, - )?; - - Ok(match parent_commit_id { - Some(id) => Some(self.to_image_with_commit(&id)), - None => None, - }) - } - - pub fn to_image_with_commit(&self, commit_id: &String) -> Image { + fn to_image(&self, cr_info: &ContainerRegistryInfo) -> Image { Image { application_id: self.id.clone(), - name: self.name.clone(), - tag: get_image_tag( - &self.root_path, - &self.dockerfile_path, - &self.environment_vars, - commit_id, - ), + name: (cr_info.get_image_name)(&self.name), + tag: "".to_string(), // It needs to be compute after creation commit_id: self.commit_id.clone(), - registry_name: None, - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, + registry_name: cr_info.registry_name.clone(), + registry_url: cr_info.endpoint.clone(), + registry_docker_json_config: cr_info.registry_docker_json_config.clone(), + repository_name: (cr_info.get_repository_name)(&self.name), } } - pub fn to_build(&self) -> Build { + pub fn to_build(&self, registry_url: &ContainerRegistryInfo) -> Build { // Retrieve ssh keys from env variables const ENV_GIT_PREFIX: &str = "GIT_SSH_KEY"; let env_ssh_keys: Vec<(String, String)> = self @@ -431,18 +330,14 @@ impl Application { let passphrase = self .environment_vars .get(&ssh_key_name.replace(ENV_GIT_PREFIX, "GIT_SSH_PASSPHRASE")) - .map(|val| base64::decode(val).ok()) - .flatten() - .map(|str| String::from_utf8(str).ok()) - .flatten(); + .and_then(|val| base64::decode(val).ok()) + .and_then(|str| String::from_utf8(str).ok()); let public_key = self .environment_vars .get(&ssh_key_name.replace(ENV_GIT_PREFIX, "GIT_SSH_PUBLIC_KEY")) - .map(|val| base64::decode(val).ok()) - .flatten() - .map(|str| String::from_utf8(str).ok()) - .flatten(); + .and_then(|val| base64::decode(val).ok()) + .and_then(|str| String::from_utf8(str).ok()); ssh_keys.push(SshKey { private_key, @@ -451,31 +346,59 @@ impl Application { }); } - Build { + // Convert our root path to an relative path to be able to append them correctly + let root_path = if Path::new(&self.root_path).is_absolute() { + PathBuf::from(self.root_path.trim_start_matches('/')) + } else { + PathBuf::from(&self.root_path) + }; + assert!(root_path.is_relative(), "root path is not a relative path"); + + let dockerfile_path = self.dockerfile_path.as_ref().map(|path| { + if Path::new(&path).is_absolute() { + root_path.join(path.trim_start_matches('/')) + } else { + root_path.join(&path) + } + }); + + //FIXME: Return a result the function + let url = Url::parse(&self.git_url).unwrap_or_else(|_| Url::parse("https://invalid-git-url.com").unwrap()); + + let mut disable_build_cache = false; + let mut build = Build { git_repository: GitRepository { - url: self.git_url.clone(), + url, credentials: self.git_credentials.as_ref().map(|credentials| Credentials { login: credentials.login.clone(), password: credentials.access_token.clone(), }), ssh_keys, commit_id: self.commit_id.clone(), - dockerfile_path: self.dockerfile_path.clone(), - root_path: self.root_path.clone(), + dockerfile_path, + root_path, buildpack_language: self.buildpack_language.clone(), }, - image: self.to_image(), - options: BuildOptions { - environment_variables: self - .environment_vars - .iter() - .map(|(k, v)| crate::build_platform::EnvironmentVariable { - key: k.clone(), - value: String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or(vec![])).into_owned(), - }) - .collect::>(), - }, - } + image: self.to_image(registry_url), + environment_variables: self + .environment_vars + .iter() + .filter_map(|(k, v)| { + // Remove special vars + let v = String::from_utf8_lossy(&base64::decode(v.as_bytes()).unwrap_or_default()).into_owned(); + if k == "QOVERY_DISABLE_BUILD_CACHE" && v.to_lowercase() == "true" { + disable_build_cache = true; + return None; + } + + Some((k.clone(), v)) + }) + .collect::>(), + disable_cache: disable_build_cache, + }; + + build.compute_image_tag(); + build } } @@ -524,17 +447,15 @@ pub enum StorageType { } impl Storage { - pub fn to_aws_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_aws_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), storage_type: match self.storage_type { - StorageType::SlowHdd => crate::cloud_provider::aws::application::StorageType::SC1, - StorageType::Hdd => crate::cloud_provider::aws::application::StorageType::ST1, - StorageType::Ssd => crate::cloud_provider::aws::application::StorageType::GP2, - StorageType::FastSsd => crate::cloud_provider::aws::application::StorageType::IO1, + StorageType::SlowHdd => AwsStorageType::SC1, + StorageType::Hdd => AwsStorageType::ST1, + StorageType::Ssd => AwsStorageType::GP2, + StorageType::FastSsd => AwsStorageType::IO1, }, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), @@ -542,26 +463,22 @@ impl Storage { } } - pub fn to_do_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_do_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::digitalocean::application::StorageType::Standard, + storage_type: DoStorageType::Standard, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, } } - pub fn to_scw_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_scw_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::scaleway::application::StorageType::BlockSsd, + storage_type: ScwStorageType::BlockSsd, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, @@ -585,12 +502,12 @@ pub struct Router { } impl Router { - pub fn to_stateless_service( + pub fn to_router_domain( &self, context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let custom_domains = self .custom_domains .iter() @@ -613,7 +530,7 @@ impl Router { match cloud_provider.kind() { CPKind::Aws => { - let router: Box = Box::new(crate::cloud_provider::aws::router::Router::new( + let router = Box::new(RouterAws::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -628,23 +545,22 @@ impl Router { Some(router) } CPKind::Do => { - let router: Box = - Box::new(crate::cloud_provider::digitalocean::router::Router::new( - context.clone(), - self.id.as_str(), - self.name.as_str(), - self.action.to_service_action(), - self.default_domain.as_str(), - custom_domains, - routes, - self.sticky_sessions_enabled, - listeners, - logger, - )); + let router = Box::new(RouterDo::new( + context.clone(), + self.id.as_str(), + self.name.as_str(), + self.action.to_service_action(), + self.default_domain.as_str(), + custom_domains, + routes, + self.sticky_sessions_enabled, + listeners, + logger, + )); Some(router) } CPKind::Scw => { - let router: Box = Box::new(crate::cloud_provider::scaleway::router::Router::new( + let router = Box::new(RouterScw::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -707,12 +623,12 @@ pub struct Database { } impl Database { - pub fn to_stateful_service( + pub fn to_database_domain( &self, context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let database_options = DatabaseOptions { mode: self.mode.clone(), login: self.username.clone(), @@ -732,7 +648,7 @@ impl Database { match cloud_provider.kind() { CPKind::Aws => match self.kind { DatabaseKind::Postgresql => { - let db: Box = Box::new(PostgreSQL::new( + let db = Box::new(PostgreSQLAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -751,7 +667,7 @@ impl Database { Some(db) } DatabaseKind::Mysql => { - let db: Box = Box::new(MySQL::new( + let db = Box::new(MySQLAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -770,7 +686,7 @@ impl Database { Some(db) } DatabaseKind::Mongodb => { - let db: Box = Box::new(MongoDB::new( + let db = Box::new(MongoDbAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -789,7 +705,7 @@ impl Database { Some(db) } DatabaseKind::Redis => { - let db: Box = Box::new(Redis::new( + let db = Box::new(RedisAws::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -810,83 +726,78 @@ impl Database { }, CPKind::Do => match self.kind { DatabaseKind::Postgresql => { - let db: Box = Box::new( - crate::cloud_provider::digitalocean::databases::postgresql::PostgreSQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - ), - ); + let db = Box::new(PostgresDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Mysql => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::mysql::MySQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(MySQLDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Redis => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::redis::Redis::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(RedisDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } DatabaseKind::Mongodb => { - let db: Box = - Box::new(crate::cloud_provider::digitalocean::databases::mongodb::MongoDB::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + let db = Box::new(MongoDo::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); Some(db) } @@ -894,70 +805,12 @@ impl Database { CPKind::Scw => match self.kind { DatabaseKind::Postgresql => match VersionsNumber::from_str(self.version.as_str()) { Ok(v) => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::postgresql::PostgreSQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); - - Some(db) - } - Err(e) => { - error!( - "{}", - format!("error while parsing postgres version, error: {}", e.message()) - ); - None - } - }, - DatabaseKind::Mysql => match VersionsNumber::from_str(self.version.as_str()) { - Ok(v) => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::mysql::MySQL::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); - - Some(db) - } - Err(e) => { - error!( - "{}", - format!("error while parsing mysql version, error: {}", e.message()) - ); - None - } - }, - DatabaseKind::Redis => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::redis::Redis::new( + let db = Box::new(PostgresScw::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - self.version.as_str(), + v, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -968,16 +821,21 @@ impl Database { logger.clone(), )); - Some(db) - } - DatabaseKind::Mongodb => { - let db: Box = - Box::new(crate::cloud_provider::scaleway::databases::mongodb::MongoDB::new( + Some(db) + } + Err(e) => { + error!("{}", format!("error while parsing postgres version, error: {}", e.message())); + None + } + }, + DatabaseKind::Mysql => match VersionsNumber::from_str(self.version.as_str()) { + Ok(v) => { + let db = Box::new(MySQLScw::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - self.version.as_str(), + v, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -985,9 +843,52 @@ impl Database { self.database_instance_type.as_str(), database_options, listeners, - logger, + logger.clone(), )); + Some(db) + } + Err(e) => { + error!("{}", format!("error while parsing mysql version, error: {}", e.message())); + None + } + }, + DatabaseKind::Redis => { + let db = Box::new(RedisScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger.clone(), + )); + + Some(db) + } + DatabaseKind::Mongodb => { + let db = Box::new(MongoDbScw::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.version.as_str(), + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options, + listeners, + logger, + )); + Some(db) } }, @@ -1078,6 +979,21 @@ pub trait ProgressListener: Send + Sync { fn delete_error(&self, info: ProgressInfo); } +pub struct NoOpProgressListener {} + +impl ProgressListener for NoOpProgressListener { + fn deployment_in_progress(&self, _info: ProgressInfo) {} + fn pause_in_progress(&self, _info: ProgressInfo) {} + fn delete_in_progress(&self, _info: ProgressInfo) {} + fn error(&self, _info: ProgressInfo) {} + fn deployed(&self, _info: ProgressInfo) {} + fn paused(&self, _info: ProgressInfo) {} + fn deleted(&self, _info: ProgressInfo) {} + fn deployment_error(&self, _info: ProgressInfo) {} + fn pause_error(&self, _info: ProgressInfo) {} + fn delete_error(&self, _info: ProgressInfo) {} +} + pub trait Listen { fn listeners(&self) -> &Listeners; fn add_listener(&mut self, listener: Listener); @@ -1144,7 +1060,7 @@ impl<'a> ListenersHelper<'a> { } } -#[derive(PartialEq, Eq, Hash, Clone)] +#[derive(Clone)] pub struct Context { organization_id: String, cluster_id: String, @@ -1152,9 +1068,10 @@ pub struct Context { workspace_root_dir: String, lib_root_dir: String, test_cluster: bool, - docker_host: Option, + docker_host: Option, features: Vec, metadata: Option, + pub docker: Docker, } #[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)] @@ -1165,13 +1082,13 @@ pub enum Features { // trait used to reimplement clone without same fields // this trait is used for Context struct -pub trait Clone2 { +pub trait CloneForTest { fn clone_not_same_execution_id(&self) -> Self; } // for test we need to clone context but to change the directory workspace used // to to this we just have to suffix the execution id in tests -impl Clone2 for Context { +impl CloneForTest for Context { fn clone_not_same_execution_id(&self) -> Context { let mut new = self.clone(); let suffix = rand::thread_rng() @@ -1192,9 +1109,10 @@ impl Context { workspace_root_dir: String, lib_root_dir: String, test_cluster: bool, - docker_host: Option, + docker_host: Option, features: Vec, metadata: Option, + docker: Docker, ) -> Self { Context { organization_id, @@ -1206,6 +1124,7 @@ impl Context { docker_host, features, metadata, + docker, } } @@ -1229,8 +1148,8 @@ impl Context { self.lib_root_dir.as_str() } - pub fn docker_tcp_socket(&self) -> Option<&String> { - self.docker_host.as_ref() + pub fn docker_tcp_socket(&self) -> &Option { + &self.docker_host } pub fn metadata(&self) -> Option<&Metadata> { @@ -1264,17 +1183,7 @@ impl Context { pub fn resource_expiration_in_seconds(&self) -> Option { match &self.metadata { - Some(meta) => meta.resource_expiration_in_seconds.map(|ttl| ttl), - _ => None, - } - } - - pub fn docker_build_options(&self) -> Option> { - match &self.metadata { - Some(meta) => meta - .docker_build_options - .clone() - .map(|b| b.split(' ').map(|x| x.to_string()).collect()), + Some(meta) => meta.resource_expiration_in_seconds, _ => None, } } @@ -1296,7 +1205,6 @@ impl Context { pub struct Metadata { pub dry_run_deploy: Option, pub resource_expiration_in_seconds: Option, - pub docker_build_options: Option, pub forced_upgrade: Option, pub disable_pleco: Option, } @@ -1305,14 +1213,12 @@ impl Metadata { pub fn new( dry_run_deploy: Option, resource_expiration_in_seconds: Option, - docker_build_options: Option, forced_upgrade: Option, disable_pleco: Option, ) -> Self { Metadata { dry_run_deploy, resource_expiration_in_seconds, - docker_build_options, forced_upgrade, disable_pleco, } @@ -1380,13 +1286,13 @@ impl Domain { } fn is_wildcarded(&self) -> bool { - self.raw.starts_with("*") + self.raw.starts_with('*') } } impl Display for Domain { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(&self.raw.as_str()) + f.write_str(self.raw.as_str()) } } @@ -1404,13 +1310,13 @@ impl ToHelmString for Domain { impl ToTerraformString for Ipv4Addr { fn to_terraform_format_string(&self) -> String { - format!("{{{}}}", self.to_string()) + format!("{{{}}}", self) } } #[cfg(test)] mod tests { - use crate::models::Domain; + use crate::io_models::{Domain, QoveryIdentifier}; #[test] fn test_domain_new() { @@ -1488,4 +1394,65 @@ mod tests { ); } } + + #[test] + fn test_qovery_identifier_new_from_long_id() { + struct TestCase<'a> { + input: String, + expected_long_id_output: String, + expected_short_output: String, + description: &'a str, + } + + // setup: + let test_cases: Vec = vec![ + TestCase { + input: "".to_string(), + expected_long_id_output: "".to_string(), + expected_short_output: "".to_string(), + description: "empty raw long ID input", + }, + TestCase { + input: "2a365285-992f-4285-ab96-c55ac81ecde9".to_string(), + expected_long_id_output: "2a365285-992f-4285-ab96-c55ac81ecde9".to_string(), + expected_short_output: "2a365285".to_string(), + description: "proper Uuid input", + }, + TestCase { + input: "2a365285".to_string(), + expected_long_id_output: "2a365285".to_string(), + expected_short_output: "2a365285".to_string(), + description: "non standard Uuid input, length 8", + }, + TestCase { + input: "2a365285hebnrfvuebr".to_string(), + expected_long_id_output: "2a365285hebnrfvuebr".to_string(), + expected_short_output: "2a365285".to_string(), + description: "non standard Uuid input, length longer than expected short (length 8)", + }, + TestCase { + input: "2a365".to_string(), + expected_long_id_output: "2a365".to_string(), + expected_short_output: "2a365".to_string(), + description: "non standard Uuid input, length shorter than expected short (length 8)", + }, + ]; + + for tc in test_cases { + // execute: + let result = QoveryIdentifier::new_from_long_id(tc.input.clone()); + + // verify: + assert_eq!( + tc.expected_long_id_output, result.raw_long_id, + "case {} : '{}'", + tc.description, tc.input + ); + assert_eq!( + tc.expected_short_output, result.short, + "case {} : '{}'", + tc.description, tc.input + ); + } + } } diff --git a/src/lib.rs b/src/lib.rs index 2d0cb7b2..00177df1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,7 @@ pub mod errors; pub mod events; pub mod fs; pub mod git; +pub mod io_models; pub mod logger; pub mod models; pub mod object_storage; diff --git a/src/logger.rs b/src/logger.rs index f5a1a2bb..4497f4b9 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,16 +1,8 @@ use crate::events::{EngineEvent, EventMessageVerbosity}; use tracing; -#[derive(Debug, Clone)] -pub enum LogLevel { - Debug, - Info, - Warning, - Error, -} - pub trait Logger: Send + Sync { - fn log(&self, log_level: LogLevel, event: EngineEvent); + fn log(&self, event: EngineEvent); fn clone_dyn(&self) -> Box; } @@ -37,7 +29,7 @@ impl Default for StdIoLogger { } impl Logger for StdIoLogger { - fn log(&self, log_level: LogLevel, event: EngineEvent) { + fn log(&self, event: EngineEvent) { let event_details = event.get_details(); let stage = event_details.stage(); let execution_id = event_details.execution_id().to_string(); @@ -63,11 +55,11 @@ impl Logger for StdIoLogger { transmitter = event_details.transmitter().to_string().as_str(), ) .in_scope(|| { - match log_level { - LogLevel::Debug => debug!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Info => info!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Warning => warn!("{}", event.message(EventMessageVerbosity::FullDetails)), - LogLevel::Error => error!("{}", event.message(EventMessageVerbosity::FullDetails)), + match event { + EngineEvent::Debug(_, _) => debug!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Info(_, _) => info!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Warning(_, _) => warn!("{}", event.message(EventMessageVerbosity::FullDetails)), + EngineEvent::Error(_, _) => error!("{}", event.message(EventMessageVerbosity::FullDetails)), }; }); } @@ -80,18 +72,17 @@ impl Logger for StdIoLogger { #[cfg(test)] mod tests { use super::*; - use crate::cloud_provider::scaleway::application::ScwRegion; use crate::cloud_provider::Kind; use crate::errors; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; - use crate::models::QoveryIdentifier; + use crate::io_models::QoveryIdentifier; + use crate::models::scaleway::ScwRegion; use tracing_test::traced_test; use url::Url; use uuid::Uuid; struct TestCase<'a> { - log_level: LogLevel, event: EngineEvent, description: &'a str, } @@ -100,11 +91,11 @@ mod tests { #[test] fn test_log() { // setup: - let orga_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); - let cluster_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); + let orga_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); + let cluster_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let cluster_name = format!("qovery-{}", cluster_id); - let execution_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); - let app_id = QoveryIdentifier::new(Uuid::new_v4().to_string()); + let execution_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); + let app_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let app_name = format!("simple-app-{}", app_id); let qovery_message = "Qovery message"; let user_message = "User message"; @@ -115,7 +106,6 @@ mod tests { let test_cases = vec![ TestCase { - log_level: LogLevel::Error, event: EngineEvent::Error( EngineError::new_unknown( EventDetails::new( @@ -133,7 +123,7 @@ mod tests { safe_message.to_string(), Some(raw_message.to_string()), )), - Some(link.clone()), + Some(link), Some(hint.to_string()), ), None, @@ -141,8 +131,7 @@ mod tests { description: "Error event", }, TestCase { - log_level: LogLevel::Info, - event: EngineEvent::Deploying( + event: EngineEvent::Info( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -150,15 +139,14 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Infrastructure(InfrastructureStep::Create), - Transmitter::Kubernetes(cluster_id.to_string(), cluster_name.to_string()), + Transmitter::Kubernetes(cluster_id.to_string(), cluster_name), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), description: "Deploying info event", }, TestCase { - log_level: LogLevel::Debug, - event: EngineEvent::Pausing( + event: EngineEvent::Debug( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -173,8 +161,7 @@ mod tests { description: "Pausing application debug event", }, TestCase { - log_level: LogLevel::Warning, - event: EngineEvent::Pausing( + event: EngineEvent::Warning( EventDetails::new( Some(Kind::Scw), orga_id.clone(), @@ -182,7 +169,7 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Environment(EnvironmentStep::Delete), - Transmitter::Application(app_id.to_string(), app_name.to_string()), + Transmitter::Application(app_id.to_string(), app_name), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), @@ -194,15 +181,15 @@ mod tests { for tc in test_cases { // execute: - logger.log(tc.log_level.clone(), tc.event.clone()); + logger.log(tc.event.clone()); // validate: assert!( - logs_contain(match tc.log_level { - LogLevel::Debug => "DEBUG", - LogLevel::Info => "INFO", - LogLevel::Warning => "WARN", - LogLevel::Error => "ERROR", + logs_contain(match tc.event { + EngineEvent::Debug(_, _) => "DEBUG", + EngineEvent::Info(_, _) => "INFO", + EngineEvent::Warning(_, _) => "WARN", + EngineEvent::Error(_, _) => "ERROR", }), "{}", tc.description @@ -219,7 +206,7 @@ mod tests { tc.description ); assert!( - logs_contain(format!("execution_id=\"{}\"", execution_id.to_string()).as_str()), + logs_contain(format!("execution_id=\"{}\"", execution_id).as_str()), "{}", tc.description ); @@ -256,17 +243,17 @@ mod tests { ); assert!( - logs_contain(format!("stage=\"{}\"", details.stage().to_string()).as_str()), + logs_contain(format!("stage=\"{}\"", details.stage()).as_str()), "{}", tc.description ); assert!( - logs_contain(format!("step=\"{}\"", details.stage().sub_step_name().to_string()).as_str()), + logs_contain(format!("step=\"{}\"", details.stage().sub_step_name()).as_str()), "{}", tc.description ); assert!( - logs_contain(format!("transmitter=\"{}\"", details.transmitter().to_string()).as_str()), + logs_contain(format!("transmitter=\"{}\"", details.transmitter()).as_str()), "{}", tc.description ); diff --git a/src/models/application.rs b/src/models/application.rs new file mode 100644 index 00000000..8eba144b --- /dev/null +++ b/src/models/application.rs @@ -0,0 +1,461 @@ +use crate::build_platform::Build; +use crate::cloud_provider::models::{EnvironmentVariable, Storage}; +use crate::cloud_provider::service::{delete_stateless_service, scale_down_application}; +use crate::cloud_provider::service::{ + deploy_stateless_service_error, deploy_user_stateless_service, send_progress_on_long_task, Action, Create, Delete, + Helm, Pause, Service, ServiceType, StatelessService, +}; +use crate::cloud_provider::utilities::{print_action, sanitize_name}; +use crate::cloud_provider::DeploymentTarget; +use crate::cmd::helm::Timeout; +use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; +use crate::logger::Logger; +use crate::models::types::CloudProvider; +use function_name::named; +use std::marker::PhantomData; +use tera::Context as TeraContext; + +#[derive(thiserror::Error, Debug)] +pub enum ApplicationError { + #[error("Application invalid configuration: {0}")] + InvalidConfig(String), +} + +pub struct Application { + _marker: PhantomData, + pub(crate) context: Context, + pub(crate) id: String, + pub(crate) action: Action, + pub(crate) name: String, + pub(crate) ports: Vec, + pub(crate) total_cpus: String, + pub(crate) cpu_burst: String, + pub(crate) total_ram_in_mib: u32, + pub(crate) min_instances: u32, + pub(crate) max_instances: u32, + pub(crate) start_timeout_in_seconds: u32, + pub(crate) build: Build, + pub(crate) storage: Vec>, + pub(crate) environment_variables: Vec, + pub(crate) listeners: Listeners, + pub(crate) logger: Box, + pub(crate) _extra_settings: T::AppExtraSettings, +} + +// Here we define the common behavior among all providers +impl Application { + pub fn new( + context: Context, + id: &str, + action: Action, + name: &str, + ports: Vec, + total_cpus: String, + cpu_burst: String, + total_ram_in_mib: u32, + min_instances: u32, + max_instances: u32, + start_timeout_in_seconds: u32, + build: Build, + storage: Vec>, + environment_variables: Vec, + extra_settings: T::AppExtraSettings, + listeners: Listeners, + logger: Box, + ) -> Result { + // TODO: Check that the information provided are coherent + + Ok(Self { + _marker: PhantomData, + context, + id: id.to_string(), + action, + name: name.to_string(), + ports, + total_cpus, + cpu_burst, + total_ram_in_mib, + min_instances, + max_instances, + start_timeout_in_seconds, + build, + storage, + environment_variables, + listeners, + logger, + _extra_settings: extra_settings, + }) + } + + pub fn is_stateful(&self) -> bool { + !self.storage.is_empty() + } + + pub fn context(&self) -> &Context { + &self.context + } + + pub fn service_type(&self) -> ServiceType { + ServiceType::Application + } + + pub fn id(&self) -> &str { + self.id.as_str() + } + + pub fn name(&self) -> &str { + self.name.as_str() + } + + pub fn commit_id(&self) -> String { + self.build.image.commit_id.clone() + } + + pub fn action(&self) -> &Action { + &self.action + } + + pub fn public_port(&self) -> Option { + self.ports + .iter() + .find(|port| port.publicly_accessible) + .map(|port| port.port as u16) + } + + pub fn start_timeout(&self) -> u32 { + (self.start_timeout_in_seconds + 10) * 4 + } + + pub fn total_cpus(&self) -> String { + self.total_cpus.to_string() + } + + pub fn cpu_burst(&self) -> String { + self.cpu_burst.to_string() + } + + pub fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib + } + + pub fn min_instances(&self) -> u32 { + self.min_instances + } + + pub fn max_instances(&self) -> u32 { + self.max_instances + } + + pub fn publicly_accessible(&self) -> bool { + self.public_port().is_some() + } + + pub fn logger(&self) -> &dyn Logger { + &*self.logger + } + + pub fn selector(&self) -> Option { + Some(format!("appId={}", self.id())) + } + + pub fn build(&self) -> &Build { + &self.build + } + + pub fn build_mut(&mut self) -> &mut Build { + &mut self.build + } + + pub fn sanitize_name(&self) -> String { + sanitize_name("app", self.id()) + } + + pub(crate) fn get_event_details(&self, stage: Stage) -> EventDetails { + let context = self.context(); + EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + self.to_transmitter(), + ) + } +} + +// Traits implementations +impl ToTransmitter for Application { + fn to_transmitter(&self) -> Transmitter { + Transmitter::Application(self.id.to_string(), self.name.to_string()) + } +} + +impl Listen for Application { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} + +pub(crate) trait ToTeraContext { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result; +} + +impl Service for Application +where + Application: ToTeraContext, +{ + fn context(&self) -> &Context { + self.context() + } + + fn service_type(&self) -> ServiceType { + self.service_type() + } + + fn id(&self) -> &str { + self.id() + } + + fn name(&self) -> &str { + self.name() + } + + fn sanitized_name(&self) -> String { + self.sanitize_name() + } + + fn version(&self) -> String { + self.commit_id() + } + + fn action(&self) -> &Action { + self.action() + } + + fn private_port(&self) -> Option { + self.public_port() + } + + fn start_timeout(&self) -> Timeout { + Timeout::Value(self.start_timeout()) + } + + fn total_cpus(&self) -> String { + self.total_cpus() + } + + fn cpu_burst(&self) -> String { + self.cpu_burst() + } + + fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib() + } + + fn min_instances(&self) -> u32 { + self.min_instances() + } + + fn max_instances(&self) -> u32 { + self.max_instances() + } + + fn publicly_accessible(&self) -> bool { + self.publicly_accessible() + } + + fn tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context(target) + } + + fn logger(&self) -> &dyn Logger { + self.logger() + } + + fn selector(&self) -> Option { + self.selector() + } +} + +impl Helm for Application { + fn helm_selector(&self) -> Option { + self.selector() + } + + fn helm_release_name(&self) -> String { + crate::string::cut(format!("application-{}-{}", self.id(), self.id()), 50) + } + + fn helm_chart_dir(&self) -> String { + format!( + "{}/{}/charts/q-application", + self.context.lib_root_dir(), + T::helm_directory_name(), + ) + } + + fn helm_chart_values_dir(&self) -> String { + String::new() + } + + fn helm_chart_external_name_service_dir(&self) -> String { + String::new() + } +} + +impl Create for Application +where + Application: Service, +{ + #[named] + fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || deploy_user_stateless_service(target, self)) + } + + fn on_create_check(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + send_progress_on_long_task(self, Action::Create, || deploy_stateless_service_error(target, self)) + } +} + +impl Pause for Application +where + Application: Service, +{ + #[named] + fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + send_progress_on_long_task(self, Action::Pause, || { + scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) + }) + } + + fn on_pause_check(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + Ok(()) + } +} + +impl Delete for Application +where + Application: Service, +{ + #[named] + fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + + send_progress_on_long_task(self, Action::Delete, || { + delete_stateless_service(target, self, event_details.clone()) + }) + } + + fn on_delete_check(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + T::short_name(), + "application", + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + + send_progress_on_long_task(self, Action::Delete, || { + delete_stateless_service(target, self, event_details.clone()) + }) + } +} + +impl StatelessService for Application +where + Application: Service, +{ + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} + +pub trait IApplication: StatelessService { + fn get_build(&self) -> &Build; + fn get_build_mut(&mut self) -> &mut Build; +} + +impl IApplication for Application +where + Application: Service, +{ + fn get_build(&self) -> &Build { + self.build() + } + + fn get_build_mut(&mut self) -> &mut Build { + self.build_mut() + } +} diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs new file mode 100644 index 00000000..1d21f284 --- /dev/null +++ b/src/models/aws/application.rs @@ -0,0 +1,90 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::aws::AwsStorageType; +use crate::models::types::AWS; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let mut context = default_tera_context(self, target.kubernetes, target.environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build().image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.build().image.registry_host()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + AwsStorageType::SC1 => "sc1", + AwsStorageType::ST1 => "st1", + AwsStorageType::GP2 => "gp2", + AwsStorageType::IO1 => "io1", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/aws/mod.rs b/src/models/aws/mod.rs new file mode 100644 index 00000000..2bbfbc4f --- /dev/null +++ b/src/models/aws/mod.rs @@ -0,0 +1,43 @@ +pub mod application; + +use crate::models::types::CloudProvider; +use crate::models::types::AWS; + +pub struct AwsAppExtraSettings {} +pub struct AwsDbExtraSettings {} +pub struct AwsRouterExtraSettings {} + +impl CloudProvider for AWS { + type AppExtraSettings = AwsAppExtraSettings; + type DbExtraSettings = AwsDbExtraSettings; + type RouterExtraSettings = AwsRouterExtraSettings; + type StorageTypes = AwsStorageType; + + fn short_name() -> &'static str { + "AWS" + } + + fn full_name() -> &'static str { + "Amazon Web Service" + } + + fn registry_short_name() -> &'static str { + "ECR" + } + + fn registry_full_name() -> &'static str { + "Elastic Container Registry" + } + + fn helm_directory_name() -> &'static str { + "aws" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum AwsStorageType { + SC1, + ST1, + GP2, + IO1, +} diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs new file mode 100644 index 00000000..5a2e7a61 --- /dev/null +++ b/src/models/digital_ocean/application.rs @@ -0,0 +1,91 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::digital_ocean::DoStorageType; +use crate::models::types::DO; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + + // This is specific to digital ocean as it is them that create the registry secret + // we don't have the hand on it + context.insert("registry_secret", "do-container-registry-secret-for-cluster"); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + DoStorageType::Standard => "do-block-storage", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/digital_ocean/mod.rs b/src/models/digital_ocean/mod.rs new file mode 100644 index 00000000..30c11461 --- /dev/null +++ b/src/models/digital_ocean/mod.rs @@ -0,0 +1,126 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::DO; +use std::fmt; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; + +pub struct DoAppExtraSettings {} +pub struct DoDbExtraSettings {} +pub struct DoRouterExtraSettings {} + +impl CloudProvider for DO { + type AppExtraSettings = DoAppExtraSettings; + type DbExtraSettings = DoDbExtraSettings; + type RouterExtraSettings = DoRouterExtraSettings; + type StorageTypes = DoStorageType; + + fn short_name() -> &'static str { + "DO" + } + + fn full_name() -> &'static str { + "Digital Ocean" + } + + fn registry_short_name() -> &'static str { + "DO CR" + } + + fn registry_full_name() -> &'static str { + "Digital Ocean Container Registry" + } + + fn helm_directory_name() -> &'static str { + "digitalocean" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum DoStorageType { + Standard, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum DoRegion { + NewYorkCity1, + NewYorkCity2, + NewYorkCity3, + Amsterdam2, + Amsterdam3, + SanFrancisco1, + SanFrancisco2, + SanFrancisco3, + Singapore, + London, + Frankfurt, + Toronto, + Bangalore, +} + +impl DoRegion { + pub fn as_str(&self) -> &str { + match self { + DoRegion::NewYorkCity1 => "nyc1", + DoRegion::NewYorkCity2 => "nyc2", + DoRegion::NewYorkCity3 => "nyc3", + DoRegion::Amsterdam2 => "ams2", + DoRegion::Amsterdam3 => "ams3", + DoRegion::SanFrancisco1 => "sfo1", + DoRegion::SanFrancisco2 => "sfo2", + DoRegion::SanFrancisco3 => "sfo3", + DoRegion::Singapore => "sgp1", + DoRegion::London => "lon1", + DoRegion::Frankfurt => "fra1", + DoRegion::Toronto => "tor1", + DoRegion::Bangalore => "blr1", + } + } +} + +impl Display for DoRegion { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + DoRegion::NewYorkCity1 => write!(f, "nyc1"), + DoRegion::NewYorkCity2 => write!(f, "nyc2"), + DoRegion::NewYorkCity3 => write!(f, "nyc3"), + DoRegion::Amsterdam2 => write!(f, "ams2"), + DoRegion::Amsterdam3 => write!(f, "ams3"), + DoRegion::SanFrancisco1 => write!(f, "sfo1"), + DoRegion::SanFrancisco2 => write!(f, "sfo2"), + DoRegion::SanFrancisco3 => write!(f, "sfo3"), + DoRegion::Singapore => write!(f, "sgp1"), + DoRegion::London => write!(f, "lon1"), + DoRegion::Frankfurt => write!(f, "fra1"), + DoRegion::Toronto => write!(f, "tor1"), + DoRegion::Bangalore => write!(f, "blr1"), + } + } +} + +impl FromStr for DoRegion { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "nyc1" => Ok(DoRegion::NewYorkCity1), + "nyc2" => Ok(DoRegion::NewYorkCity2), + "nyc3" => Ok(DoRegion::NewYorkCity3), + "ams2" => Ok(DoRegion::Amsterdam2), + "ams3" => Ok(DoRegion::Amsterdam3), + "sfo1" => Ok(DoRegion::SanFrancisco1), + "sfo2" => Ok(DoRegion::SanFrancisco2), + "sfo3" => Ok(DoRegion::SanFrancisco3), + "sgp1" => Ok(DoRegion::Singapore), + "lon1" => Ok(DoRegion::London), + "fra1" => Ok(DoRegion::Frankfurt), + "tor1" => Ok(DoRegion::Toronto), + "blr1" => Ok(DoRegion::Bangalore), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); + } + } + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 00000000..8db33ab5 --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,5 @@ +pub mod application; +pub mod aws; +pub mod digital_ocean; +pub mod scaleway; +pub mod types; diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs new file mode 100644 index 00000000..2b14300f --- /dev/null +++ b/src/models/scaleway/application.rs @@ -0,0 +1,103 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::scaleway::ScwStorageType; +use crate::models::types::SCW; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + // TODO(benjaminch): Switch to proper storage class + // Note: Seems volume storage type are not supported, only blocked storage for the time being + // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass + ScwStorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", + ScwStorageType::LocalSsd => "l_ssd", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + // container registry credentials + context.insert( + "container_registry_docker_json_config", + self.build + .image + .clone() + .registry_docker_json_config + .unwrap_or_default() + .as_str(), + ); + + Ok(context) + } +} diff --git a/src/models/scaleway/mod.rs b/src/models/scaleway/mod.rs new file mode 100644 index 00000000..ab73a293 --- /dev/null +++ b/src/models/scaleway/mod.rs @@ -0,0 +1,207 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::SCW; +use std::fmt; +use std::str::FromStr; + +pub struct ScwAppExtraSettings {} +pub struct ScwDbExtraSettings {} +pub struct ScwRouterExtraSettings {} + +impl CloudProvider for SCW { + type AppExtraSettings = ScwAppExtraSettings; + type DbExtraSettings = ScwDbExtraSettings; + type RouterExtraSettings = ScwRouterExtraSettings; + type StorageTypes = ScwStorageType; + + fn short_name() -> &'static str { + "SCW" + } + + fn full_name() -> &'static str { + "Scaleway" + } + + fn registry_short_name() -> &'static str { + "SCW CR" + } + + fn registry_full_name() -> &'static str { + "Scaleway Container Registry" + } + + fn helm_directory_name() -> &'static str { + "scaleway" + } +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] +pub enum ScwStorageType { + #[serde(rename = "b_ssd")] + BlockSsd, + #[serde(rename = "l_ssd")] + LocalSsd, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwRegion { + Paris, + Amsterdam, + Warsaw, +} + +impl ScwRegion { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwRegion::Paris => "fr-par", + ScwRegion::Amsterdam => "nl-ams", + ScwRegion::Warsaw => "pl-waw", + } + } +} + +impl fmt::Display for ScwRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwRegion::Paris => write!(f, "fr-par"), + ScwRegion::Amsterdam => write!(f, "nl-ams"), + ScwRegion::Warsaw => write!(f, "pl-waw"), + } + } +} + +impl FromStr for ScwRegion { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "fr-par" => Ok(ScwRegion::Paris), + "nl-ams" => Ok(ScwRegion::Amsterdam), + "pl-waw" => Ok(ScwRegion::Warsaw), + _ => Err(()), + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwZone { + Paris1, + Paris2, + Paris3, + Amsterdam1, + Warsaw1, +} + +impl ScwZone { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwZone::Paris1 => "fr-par-1", + ScwZone::Paris2 => "fr-par-2", + ScwZone::Paris3 => "fr-par-3", + ScwZone::Amsterdam1 => "nl-ams-1", + ScwZone::Warsaw1 => "pl-waw-1", + } + } + + pub fn region(&self) -> ScwRegion { + match self { + ScwZone::Paris1 => ScwRegion::Paris, + ScwZone::Paris2 => ScwRegion::Paris, + ScwZone::Paris3 => ScwRegion::Paris, + ScwZone::Amsterdam1 => ScwRegion::Amsterdam, + ScwZone::Warsaw1 => ScwRegion::Warsaw, + } + } + + // TODO(benjaminch): improve / refactor this! + pub fn region_str(&self) -> String { + match self { + ScwZone::Paris1 => "fr-par", + ScwZone::Paris2 => "fr-par", + ScwZone::Paris3 => "fr-par", + ScwZone::Amsterdam1 => "nl-ams", + ScwZone::Warsaw1 => "pl-waw", + } + .to_string() + } +} + +impl fmt::Display for ScwZone { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwZone::Paris1 => write!(f, "fr-par-1"), + ScwZone::Paris2 => write!(f, "fr-par-2"), + ScwZone::Paris3 => write!(f, "fr-par-3"), + ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), + ScwZone::Warsaw1 => write!(f, "pl-waw-1"), + } + } +} + +impl FromStr for ScwZone { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "fr-par-1" => Ok(ScwZone::Paris1), + "fr-par-2" => Ok(ScwZone::Paris2), + "fr-par-3" => Ok(ScwZone::Paris3), + "nl-ams-1" => Ok(ScwZone::Amsterdam1), + "pl-waw-1" => Ok(ScwZone::Warsaw1), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::{ScwRegion, ScwZone}; + use std::str::FromStr; + + #[test] + fn test_region_to_str() { + assert_eq!("fr-par", ScwRegion::Paris.as_str()); + assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); + assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); + } + + #[test] + fn test_region_from_str() { + assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); + assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); + assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); + } + + #[test] + fn test_zone_to_str() { + assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); + assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); + assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); + assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); + assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); + } + + #[test] + fn test_zone_from_str() { + assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); + assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); + assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); + assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); + assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); + } + + #[test] + fn test_zone_region() { + assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); + assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); + } +} diff --git a/src/models/types.rs b/src/models/types.rs new file mode 100644 index 00000000..75fef0ea --- /dev/null +++ b/src/models/types.rs @@ -0,0 +1,19 @@ +// Those types are just marker types that are use to tag our struct/object model +pub struct AWS {} +pub struct DO {} +pub struct SCW {} + +// CloudProvider trait allows to derive all the custom type we need per provider, +// with our marker type defined above to be able to select the correct one +pub trait CloudProvider { + type AppExtraSettings; + type DbExtraSettings; + type RouterExtraSettings; + type StorageTypes; + + fn short_name() -> &'static str; + fn full_name() -> &'static str; + fn registry_short_name() -> &'static str; + fn registry_full_name() -> &'static str; + fn helm_directory_name() -> &'static str; +} diff --git a/src/object_storage/errors.rs b/src/object_storage/errors.rs new file mode 100644 index 00000000..3a80ec28 --- /dev/null +++ b/src/object_storage/errors.rs @@ -0,0 +1,61 @@ +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum ObjectStorageError { + #[error("Invalid bucket name error for `{bucket_name:?}`: {raw_error_message:?}.")] + InvalidBucketName { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot create bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotCreateBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot delete bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotDeleteBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot empty bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotEmptyBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot tag bucket error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotTagBucket { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot get workspace error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotGetWorkspace { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot create file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotCreateFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot open file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotOpenFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot read file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotReadFile { + bucket_name: String, + raw_error_message: String, + }, + #[error("Cannot get object file `{file_name:?}` error in `{bucket_name:?}`: {raw_error_message:?}.")] + CannotGetObjectFile { + bucket_name: String, + file_name: String, + raw_error_message: String, + }, + #[error("Cannot upload file error for `{bucket_name:?}`: {raw_error_message:?}.")] + CannotUploadFile { + bucket_name: String, + raw_error_message: String, + }, +} diff --git a/src/object_storage/mod.rs b/src/object_storage/mod.rs index 329c6ae4..27684d8a 100644 --- a/src/object_storage/mod.rs +++ b/src/object_storage/mod.rs @@ -1,9 +1,10 @@ use serde::{Deserialize, Serialize}; -use crate::error::{EngineError, EngineErrorCause, EngineErrorScope}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; +use crate::object_storage::errors::ObjectStorageError; use std::fs::File; +pub mod errors; pub mod s3; pub mod scaleway_object_storage; pub mod spaces; @@ -16,22 +17,16 @@ pub trait ObjectStorage { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn is_valid(&self) -> Result<(), EngineError>; - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError>; - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError>; - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError>; - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError>; - fn engine_error_scope(&self) -> EngineErrorScope { - EngineErrorScope::ObjectStorage(self.id().to_string(), self.name().to_string()) - } - fn engine_error(&self, cause: EngineErrorCause, message: String) -> EngineError { - EngineError::new( - cause, - self.engine_error_scope(), - self.context().execution_id(), - Some(message), - ) - } + fn is_valid(&self) -> Result<(), ObjectStorageError>; + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError>; + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError>; + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError>; + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError>; } #[derive(Serialize, Deserialize, Clone)] diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 68dbf09e..0de654aa 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -14,8 +14,8 @@ use rusoto_s3::{ }; use tokio::io; -use crate::error::{EngineError, EngineErrorCause}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; +use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime::block_on; @@ -45,9 +45,9 @@ impl S3 { context, id, name, - access_key_id: access_key_id.to_string(), - secret_access_key: secret_access_key.to_string(), - region: region.clone(), + access_key_id, + secret_access_key, + region, bucket_versioning_activated, bucket_ttl_in_seconds, } @@ -59,7 +59,7 @@ impl S3 { fn get_s3_client(&self) -> S3Client { let region = RusotoRegion::from_str(&self.region.to_aws_format()) - .expect(format!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format()).as_str()); + .unwrap_or_else(|_| panic!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format())); let client = Client::new_with( self.get_credentials(), HttpClient::new().expect("unable to create new Http client"), @@ -68,9 +68,12 @@ impl S3 { S3Client::new_with_client(client, region) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } Ok(()) @@ -90,15 +93,8 @@ impl S3 { .is_ok() } - fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to empty S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -131,14 +127,10 @@ impl S3 { ..Default::default() }), ) { - let message = format!( - "While trying to empty S3 bucket `{}` region `{}`, cannot delete content: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -163,20 +155,13 @@ impl ObjectStorage for S3 { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ObjectStorageError> { // TODO check valid credentials Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to create S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -192,14 +177,10 @@ impl ObjectStorage for S3 { }), ..Default::default() })) { - let message = format!( - "While trying to create S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } let creation_date: DateTime = Utc::now(); @@ -210,53 +191,35 @@ impl ObjectStorage for S3 { tag_set: vec![ Tag { key: "CreationDate".to_string(), - value: format!("{}", creation_date.to_rfc3339()), + value: creation_date.to_rfc3339(), }, Tag { key: "Ttl".to_string(), - value: format!("{}", self.bucket_ttl_in_seconds.unwrap_or_else(|| 0).to_string()), + value: format!("{}", self.bucket_ttl_in_seconds.unwrap_or(0)), }, ], }, ..Default::default() })) { - let message = format!( - "While trying to add tags on S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotTagBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } if self.bucket_versioning_activated { - if let Err(e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { + // Not blocking if fails for the ttime being + let _ = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { bucket: bucket_name.to_string(), ..Default::default() - })) { - let message = format!( - "While trying to activate versioning on S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - } + })); } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete S3 bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -270,47 +233,37 @@ impl ObjectStorage for S3 { expected_bucket_owner: None, })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to delete S3 bucket, name `{}` region `{}`: {}", - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/s3/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -344,76 +297,50 @@ impl ObjectStorage for S3 { let file = File::open(path).unwrap(); Ok((file_path, file)) } - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotCreateFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - Err(e) => { - let message = format!( - "While trying to get object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { - if let Err(message) = S3::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { + let _ = S3::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), expected_bucket_owner: None, ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.region.to_aws_format(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -424,7 +351,7 @@ mod tests { struct TestCase<'a> { bucket_name_input: &'a str, - expected_output: Result<(), Option>, + expected_output: Result<(), ObjectStorageError>, description: &'a str, } @@ -434,7 +361,10 @@ mod tests { let test_cases: Vec = vec![ TestCase { bucket_name_input: "", - expected_output: Err(Some(String::from("bucket name cannot be empty"))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "".to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }), description: "bucket name is empty", }, TestCase { diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index 07982e9b..1b0e376d 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -2,11 +2,11 @@ use chrono::{DateTime, Utc}; use std::fs::File; use std::path::Path; -use crate::cloud_provider::scaleway::application::ScwZone; -use crate::error::{EngineError, EngineErrorCause}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::{Kind, ObjectStorage}; +use crate::models::scaleway::ScwZone; +use crate::object_storage::errors::ObjectStorageError; use crate::runtime::block_on; use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; use rusoto_credential::StaticProvider; @@ -76,36 +76,34 @@ impl ScalewayOS { } fn get_endpoint_url_for_region(&self) -> String { - format!("https://s3.{}.scw.cloud", self.zone.region().to_string()) + format!("https://s3.{}.scw.cloud", self.zone.region()) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } // From Scaleway doc // Note: The SSL certificate does not support bucket names containing additional dots (.). // You may receive a SSL warning in your browser when accessing a bucket like my.bucket.name.s3.fr-par.scw.cloud // and it is recommended to use dashes (-) instead: my-bucket-name.s3.fr-par.scw.cloud. if bucket_name.contains('.') { - return Err(Some( - "bucket name cannot contain '.' in its name, recommended to use '-' instead".to_string(), - )); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }); } Ok(()) } - fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to empty object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -138,14 +136,10 @@ impl ScalewayOS { ..Default::default() }), ) { - let message = format!( - "While trying to empty object-storage bucket `{}` region `{}`, cannot delete content: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -180,20 +174,13 @@ impl ObjectStorage for ScalewayOS { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { - todo!() + fn is_valid(&self) -> Result<(), ObjectStorageError> { + Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to create object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -211,14 +198,10 @@ impl ObjectStorage for ScalewayOS { }), ..Default::default() })) { - let message = format!( - "While trying to create object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } let creation_date: DateTime = Utc::now(); @@ -234,52 +217,35 @@ impl ObjectStorage for ScalewayOS { }, Tag { key: "Ttl".to_string(), - value: format!("Ttl={}", self.bucket_ttl_in_seconds.unwrap_or_else(|| 0).to_string()), + value: format!("Ttl={}", self.bucket_ttl_in_seconds.unwrap_or(0)), }, ], }, ..Default::default() })) { - let message = format!( - "While trying to add tags on object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotTagBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } if self.bucket_versioning_activated { - if let Err(e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { + if let Err(_e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { bucket: bucket_name.to_string(), ..Default::default() })) { - let message = format!( - "While trying to activate versioning on object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); // TODO(benjaminch): to be investigated, versioning seems to fail - // Err(self.engine_error(EngineErrorCause::Internal, message)) + // Not blocking if it fails + // Err(self.engine_error(ObjectStorageErrorCause::Internal, message)) } } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -297,50 +263,40 @@ impl ObjectStorage for ScalewayOS { ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to delete object-storage bucket, name `{}` region `{}`: {}", - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + Err(e) => Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, BucketDeleteStrategy::Empty => Ok(()), // Do not delete the bucket } } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/scaleway_os/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -373,76 +329,50 @@ impl ObjectStorage for ScalewayOS { let file = File::open(path).unwrap(); Ok((file_path, file)) } - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, - Err(e) => { - let message = format!("{}", e); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - Err(e) => { - let message = format!( - "While trying to get object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `scaleway-api-rs` once object storage will be supported (https://github.com/Qovery/scaleway-api-rs/issues/12). - if let Err(message) = ScalewayOS::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = ScalewayOS::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}` region `{}`, error: {}", - object_key, - bucket_name, - self.zone.region_str(), - e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -453,7 +383,7 @@ mod tests { struct TestCase<'a> { bucket_name_input: &'a str, - expected_output: Result<(), Option>, + expected_output: Result<(), ObjectStorageError>, description: &'a str, } @@ -463,14 +393,19 @@ mod tests { let test_cases: Vec = vec![ TestCase { bucket_name_input: "", - expected_output: Err(Some(String::from("bucket name cannot be empty"))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "".to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }), description: "bucket name is empty", }, TestCase { bucket_name_input: "containing.dot", - expected_output: Err(Some(String::from( - "bucket name cannot contain '.' in its name, recommended to use '-' instead", - ))), + expected_output: Err(ObjectStorageError::InvalidBucketName { + bucket_name: "containing.dot".to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }), description: "bucket name contains dot char", }, TestCase { diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index 13f1ee92..b337f060 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -11,9 +11,9 @@ use rusoto_s3::{ }; use tokio::io; -use crate::cloud_provider::digitalocean::application::DoRegion; -use crate::error::{EngineError, EngineErrorCause}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; +use crate::models::digital_ocean::DoRegion; +use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime; use crate::runtime::block_on; @@ -74,29 +74,27 @@ impl Spaces { S3Client::new_with_client(client, region) } - fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), ObjectStorageError> { if bucket_name.is_empty() { - return Err(Some("bucket name cannot be empty".to_string())); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot be empty".to_string(), + }); } if bucket_name.contains('.') { - return Err(Some( - "bucket name cannot contain '.' in its name, recommended to use '-' instead".to_string(), - )); + return Err(ObjectStorageError::InvalidBucketName { + bucket_name: bucket_name.to_string(), + raw_error_message: "bucket name cannot contain '.' in its name, recommended to use '-' instead" + .to_string(), + }); } Ok(()) } - pub fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to delete object-storage bucket, name `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + pub fn empty_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -129,12 +127,10 @@ impl Spaces { ..Default::default() }), ) { - let message = format!( - "While trying to delete object-storage bucket `{}`, cannot delete content: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotEmptyBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } } @@ -156,7 +152,7 @@ impl Spaces { bucket_name: T, object_key: S, download_into_file_path: X, - ) -> Result + ) -> Result where T: Into, S: Into, @@ -171,10 +167,12 @@ impl Spaces { let client = Client::new_with(credentials, HttpClient::new().unwrap()); let s3_client = S3Client::new_with_client(client, region.clone()); + let bucket_name: String = bucket_name.into(); + let object_key: String = object_key.into(); let object = s3_client .get_object(GetObjectRequest { - bucket: bucket_name.into(), - key: object_key.into(), + bucket: bucket_name.to_string(), + key: object_key.to_string(), ..Default::default() }) .await; @@ -195,12 +193,22 @@ impl Spaces { match file { Ok(mut created_file) => match io::copy(&mut body, &mut created_file).await { Ok(_) => Ok(File::open(download_into_file_path.as_ref()).unwrap()), - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), }, - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } - Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))), + Err(e) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: e.to_string(), + }), } } } @@ -222,20 +230,13 @@ impl ObjectStorage for Spaces { self.name.as_str() } - fn is_valid(&self) -> Result<(), EngineError> { + fn is_valid(&self) -> Result<(), ObjectStorageError> { // TODO check valid credentials Ok(()) } - fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "error while trying to create object-storage bucket `{}` is invalid: {}", - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + fn create_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); @@ -250,18 +251,16 @@ impl ObjectStorage for Spaces { bucket: bucket_name.to_string(), ..Default::default() })) { - let message = format!( - "error while trying to create object-storage bucket `{}`: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotCreateBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } Ok(()) } - fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + fn delete_bucket(&self, bucket_name: &str) -> Result<(), ObjectStorageError> { let s3_client = self.get_s3_client(); // make sure to delete all bucket content before trying to delete the bucket @@ -279,36 +278,38 @@ impl ObjectStorage for Spaces { })) { Ok(_) => Ok(()), Err(e) => { - let message = format!( - "While trying to delete object-storage bucket, name `{}`: {}", - bucket_name, e - ); - error!("{}", message); - return Err(self.engine_error(EngineErrorCause::Internal, message)); + return Err(ObjectStorageError::CannotDeleteBucket { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }); } }, BucketDeleteStrategy::Empty => Ok(()), // Do not delete the bucket }; } - fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { + fn get( + &self, + bucket_name: &str, + object_key: &str, + use_cache: bool, + ) -> Result<(StringPath, File), ObjectStorageError> { let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), format!("object-storage/spaces/{}", self.name()), ) - .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; + .map_err(|err| ObjectStorageError::CannotGetWorkspace { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + })?; let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { // does config file already exists? - match File::open(file_path.as_str()) { - Ok(file) => { - debug!("{} cache hit", file_path.as_str()); - return Ok((file_path, file)); - } - Err(_) => debug!("{} cache miss", file_path.as_str()), + if let Ok(file) = File::open(file_path.as_str()) { + return Ok((file_path, file)); } } @@ -316,13 +317,7 @@ impl ObjectStorage for Spaces { let result = retry::retry(Fibonacci::from_millis(3000).take(5), || { match runtime::block_on(self.get_object(bucket_name, object_key, file_path.as_str())) { Ok(file) => OperationResult::Ok(file), - Err(err) => { - debug!("{:?}", err); - - warn!("Can't download object '{}/{}'. Let's retry...", bucket_name, object_key); - - OperationResult::Retry(err) - } + Err(err) => OperationResult::Retry(err), } }); @@ -331,57 +326,49 @@ impl ObjectStorage for Spaces { Err(err) => { return match err { Error::Operation { error, .. } => Err(error), - Error::Internal(err) => Err(self.engine_error(EngineErrorCause::Internal, err)), + Error::Internal(err) => Err(ObjectStorageError::CannotGetObjectFile { + bucket_name: bucket_name.to_string(), + file_name: object_key.to_string(), + raw_error_message: err, + }), }; } }; match file { Ok(file) => Ok((file_path, file)), - Err(err) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", err))), + Err(err) => Err(ObjectStorageError::CannotOpenFile { + bucket_name: bucket_name.to_string(), + raw_error_message: err.to_string(), + }), } } - fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { + fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), ObjectStorageError> { // TODO(benjamin): switch to `digitalocean-api-rs` once we'll made the auo-generated lib - if let Err(message) = Spaces::is_bucket_name_valid(bucket_name) { - let message = format!( - "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", - object_key, - bucket_name, - message.unwrap_or_else(|| "unknown error".to_string()) - ); - return Err(self.engine_error(EngineErrorCause::Internal, message)); - } + let _ = Spaces::is_bucket_name_valid(bucket_name)?; let s3_client = self.get_s3_client(); match block_on(s3_client.put_object(PutObjectRequest { bucket: bucket_name.to_string(), key: object_key.to_string(), - body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + body: Some(StreamingBody::from(match std::fs::read(file_path) { Ok(x) => x, Err(e) => { - return Err(self.engine_error( - EngineErrorCause::Internal, - format!( - "error while uploading object {} to bucket {}. {}", - object_key, bucket_name, e - ), - )) + return Err(ObjectStorageError::CannotReadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }) } })), ..Default::default() })) { Ok(_) => Ok(()), - Err(e) => { - let message = format!( - "While trying to put object `{}` from bucket `{}`, error: {}", - object_key, bucket_name, e - ); - error!("{}", message); - Err(self.engine_error(EngineErrorCause::Internal, message)) - } + Err(e) => Err(ObjectStorageError::CannotUploadFile { + bucket_name: bucket_name.to_string(), + raw_error_message: e.to_string(), + }), } } } diff --git a/src/template.rs b/src/template.rs index 6f86abf5..670da04c 100644 --- a/src/template.rs +++ b/src/template.rs @@ -22,10 +22,12 @@ where let error_msg = match e.kind { tera::ErrorKind::TemplateNotFound(x) => format!("template not found: {}", x), tera::ErrorKind::Msg(x) => format!("tera error: {}", x), - tera::ErrorKind::CircularExtend { tpl, inheritance_chain } => format!( - "circular extend - template: {}, inheritance chain: {:?}", - tpl, inheritance_chain - ), + tera::ErrorKind::CircularExtend { tpl, inheritance_chain } => { + format!( + "circular extend - template: {}, inheritance chain: {:?}", + tpl, inheritance_chain + ) + } tera::ErrorKind::MissingParent { current, parent } => { format!("missing parent - current: {}, parent: {}", current, parent) } @@ -83,12 +85,11 @@ where .follow_links(true) .into_iter() .filter_map(|e| e.ok()) - .filter(|e| e.file_name().to_str().map(|s| s.contains(".j2.")).unwrap_or(false)) - .collect::>(); + .filter(|e| e.file_name().to_str().map(|s| s.contains(".j2.")).unwrap_or(false)); let mut results: Vec = vec![]; - for file in files.into_iter() { + for file in files { let path_str = file.path().to_str().unwrap(); let j2_path = path_str.replace(root_dir_str, ""); diff --git a/src/transaction.rs b/src/transaction.rs index 8045a5e8..785c3b6e 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -1,24 +1,26 @@ -use std::collections::HashMap; -use std::thread; +use crate::build_platform::BuildError; +use crate::cloud_provider::environment::Environment; +use std::cell::RefCell; +use std::rc::Rc; -use crate::build_platform::BuildResult; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::{Application, Service}; -use crate::container_registry::PushResult; -use crate::engine::EngineConfig; +use crate::cloud_provider::service::{Action, Service}; +use crate::container_registry::errors::ContainerRegistryError; +use crate::container_registry::to_engine_error; +use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; -use crate::events::{EngineEvent, EventMessage}; -use crate::logger::{LogLevel, Logger}; -use crate::models::{ - Action, Environment, EnvironmentAction, EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, - ProgressScope, +use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, Transmitter}; +use crate::io_models::{ + EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; +use crate::logger::Logger; +use crate::models::application::IApplication; pub struct Transaction<'a> { engine: &'a EngineConfig, logger: Box, - steps: Vec>, - executed_steps: Vec>, + steps: Vec, + executed_steps: Vec, current_step: StepName, is_transaction_aborted: Box bool>, on_step_change: Box, @@ -30,9 +32,11 @@ impl<'a> Transaction<'a> { logger: Box, is_transaction_aborted: Box bool>, on_step_change: Box, - ) -> Result { + ) -> Result { let _ = engine.is_valid()?; - let _ = engine.kubernetes().is_valid()?; + if let Err(e) = engine.kubernetes().is_valid() { + return Err(EngineConfigError::KubernetesNotValid(e)); + } let mut tx = Transaction::<'a> { engine, @@ -48,6 +52,19 @@ impl<'a> Transaction<'a> { Ok(tx) } + fn get_event_details(&self, stage: Stage, transmitter: Transmitter) -> EventDetails { + let context = self.engine.context(); + EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + transmitter, + ) + } + pub fn set_current_step(&mut self, step: StepName) { (self.on_step_change)(&step); self.current_step = step; @@ -68,9 +85,9 @@ impl<'a> Transaction<'a> { Ok(()) } - pub fn deploy_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { + pub fn deploy_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { self.deploy_environment_with_options( - environment_action, + environment, DeploymentOption { force_build: false, force_push: false, @@ -78,143 +95,132 @@ impl<'a> Transaction<'a> { ) } + pub fn build_environment( + &mut self, + environment: &Rc>, + option: DeploymentOption, + ) -> Result<(), EnvironmentError> { + self.steps.push(Step::BuildEnvironment(environment.clone(), option)); + + Ok(()) + } + pub fn deploy_environment_with_options( &mut self, - environment_action: &'a EnvironmentAction, + environment: &Rc>, option: DeploymentOption, ) -> Result<(), EnvironmentError> { // add build step - self.steps.push(Step::BuildEnvironment(environment_action, option)); + self.build_environment(environment, option)?; // add deployment step - self.steps.push(Step::DeployEnvironment(environment_action)); + self.steps.push(Step::DeployEnvironment(environment.clone())); Ok(()) } - pub fn pause_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { - self.steps.push(Step::PauseEnvironment(environment_action)); + pub fn pause_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { + self.steps.push(Step::PauseEnvironment(environment.clone())); Ok(()) } - pub fn delete_environment(&mut self, environment_action: &'a EnvironmentAction) -> Result<(), EnvironmentError> { - self.steps.push(Step::DeleteEnvironment(environment_action)); + pub fn delete_environment(&mut self, environment: &Rc>) -> Result<(), EnvironmentError> { + self.steps.push(Step::DeleteEnvironment(environment.clone())); Ok(()) } - fn load_build_app_cache(&self, app: &crate::models::Application) -> Result<(), EngineError> { - let container_registry = self.engine.container_registry(); - let mut image = app.to_image(); - - image.tag = String::from("latest"); - // pull image from container registry - // FIXME: if one day we use something else than LocalDocker to build image - // FIXME: we'll need to send the PullResult to the Build implementation - let _ = match container_registry.pull(&image) { - Ok(pull_result) => pull_result, - Err(err) => { - warn!( - "{}", - err.message.clone().unwrap_or(format!( - "something goes wrong while pulling image from {:?} container registry", - container_registry.kind() - )) - ); - return Err(EngineError::new_from_legacy_engine_error(err)); - } - }; - - Ok(()) - } - - fn build_applications( + fn build_and_push_applications( &self, - environment: &Environment, + applications: &mut [Box], option: &DeploymentOption, - ) -> Result>, EngineError> { + ) -> Result<(), EngineError> { // do the same for applications - let apps_to_build = environment - .applications - .iter() + let mut apps_to_build = applications + .iter_mut() // build only applications that are set with Action: Create - .filter(|app| app.action == Action::Create); - - let application_and_result_tuples = apps_to_build - .map(|app| { - let image = app.to_image(); - let build_result = if option.force_build || !self.engine.container_registry().does_image_exists(&image) - { - // If an error occurred we can skip it. It's not critical. - let _ = self.load_build_app_cache(app); - - // only if the build is forced OR if the image does not exist in the registry - self.engine - .build_platform() - .build(app.to_build(), option.force_build, &self.is_transaction_aborted) - } else { - // use the cache - Ok(BuildResult::new(app.to_build())) - }; - - (app, build_result) - }) + .filter(|app| *app.action() == Action::Create) .collect::>(); - let mut applications: Vec> = Vec::with_capacity(application_and_result_tuples.len()); - for (application, result) in application_and_result_tuples { - // catch build error, can't do it in Fn - let build_result = match result { - Err(err) => { - error!("build error for application {}: {:?}", application.id.as_str(), err); - return Err(err); - } - Ok(build_result) => build_result, + // If nothing to build, do nothing + if apps_to_build.is_empty() { + return Ok(()); + } + + // To convert ContainerError to EngineError + let cr_to_engine_error = |err: ContainerRegistryError| -> EngineError { + let event_details = self.get_event_details( + Stage::Environment(EnvironmentStep::Build), + Transmitter::ContainerRegistry( + self.engine.container_registry().id().to_string(), + self.engine.container_registry().name().to_string(), + ), + ); + to_engine_error(event_details, err) + }; + + let build_event_details = || -> EventDetails { + self.get_event_details( + Stage::Environment(EnvironmentStep::Build), + Transmitter::BuildPlatform( + self.engine.build_platform().id().to_string(), + self.engine.build_platform().name().to_string(), + ), + ) + }; + + // Do setup of registry and be sure we are login to the registry + let cr_registry = self.engine.container_registry(); + let _ = cr_registry.create_registry().map_err(cr_to_engine_error)?; + + for app in apps_to_build.iter_mut() { + // If image already exist in the registry, skip the build + if !option.force_build && cr_registry.does_image_exists(&app.get_build().image) { + continue; + } + + // Be sure that our repository exist before trying to pull/push images from it + let _ = self + .engine + .container_registry() + .create_repository(app.get_build().image.repository_name()) + .map_err(cr_to_engine_error)?; + + // Ok now everything is setup, we can try to build the app + let build_result = self + .engine + .build_platform() + .build(app.get_build_mut(), &self.is_transaction_aborted); + + // logging + let image_name = app.get_build().image.full_image_name_with_tag(); + let msg = match &build_result { + Ok(_) => format!("✅ Container {} is built", &image_name), + Err(BuildError::Aborted(_)) => format!("🚫 Container {} build has been canceled", &image_name), + Err(err) => format!("❌ Container {} failed to be build: {}", &image_name, err), }; - if let Some(app) = application.to_application( - self.engine.context(), - &build_result.build.image, - self.engine.cloud_provider(), - self.logger.clone(), - ) { - applications.push(app) - } + let progress_info = ProgressInfo::new( + ProgressScope::Application { + id: app.id().to_string(), + }, + match build_result.is_ok() { + true => ProgressLevel::Info, + false => ProgressLevel::Error, + }, + Some(msg.to_string()), + self.engine.context().execution_id(), + ); + ListenersHelper::new(self.engine.build_platform().listeners()).deployment_in_progress(progress_info); + + let event_details = build_event_details(); + self.logger + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg))); + + // Abort if it was an error + let _ = build_result.map_err(|err| crate::build_platform::to_engine_error(event_details, err))?; } - Ok(applications) - } - - fn push_applications( - &self, - applications: Vec>, - option: &DeploymentOption, - ) -> Result, PushResult)>, EngineError> { - let application_and_push_results: Vec<_> = applications - .into_iter() - .map(|mut app| { - match self.engine.container_registry().push(app.image(), option.force_push) { - Ok(push_result) => { - // I am not a big fan of doing that but it's the most effective way - app.set_image(push_result.image.clone()); - Ok((app, push_result)) - } - Err(err) => Err(err), - } - }) - .collect(); - - let mut results: Vec<(Box, PushResult)> = vec![]; - for result in application_and_push_results.into_iter() { - match result { - Ok(tuple) => results.push(tuple), - Err(err) => { - error!("error pushing docker image {:?}", err); - return Err(EngineError::new_from_legacy_engine_error(err)); - } - } - } - - Ok(results) + Ok(()) } pub fn rollback(&self) -> Result<(), RollbackError> { @@ -223,19 +229,19 @@ impl<'a> Transaction<'a> { Step::CreateKubernetes => { // revert kubernetes creation if let Err(err) = self.engine.kubernetes().on_create_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::DeleteKubernetes => { // revert kubernetes deletion if let Err(err) = self.engine.kubernetes().on_delete_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::PauseKubernetes => { // revert pause if let Err(err) = self.engine.kubernetes().on_pause_error() { - return Err(RollbackError::CommitError(err)); + return Err(RollbackError::CommitError(Box::new(err))); }; } Step::BuildEnvironment(_environment_action, _option) => { @@ -243,13 +249,13 @@ impl<'a> Transaction<'a> { } Step::DeployEnvironment(environment_action) => { // revert environment deployment - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } Step::PauseEnvironment(environment_action) => { - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } Step::DeleteEnvironment(environment_action) => { - self.rollback_environment(*environment_action)?; + self.rollback_environment(&(environment_action.as_ref().borrow()))?; } } } @@ -257,65 +263,25 @@ impl<'a> Transaction<'a> { Ok(()) } - /// This function is a wrapper to correctly revert all changes of an attempted deployment AND - /// if a failover environment is provided, then rollback. - fn rollback_environment(&self, environment_action: &EnvironmentAction) -> Result<(), RollbackError> { - let qe_environment = |environment: &Environment| { - let mut _applications = Vec::with_capacity(environment.applications.len()); - for application in environment.applications.iter() { - let build = application.to_build(); - - if let Some(x) = application.to_application( - self.engine.context(), - &build.image, - self.engine.cloud_provider(), - self.logger.clone(), - ) { - _applications.push(x) - } - } - - let qe_environment = environment.to_qe_environment( - self.engine.context(), - &_applications, - self.engine.cloud_provider(), - self.logger.clone(), - ); - - qe_environment + // Warning: This function function does not revert anything, it just there to grab info from kube and services if it fails + // FIXME: Cleanup this, qe_environment should not be rebuilt at this step + fn rollback_environment(&self, environment: &Environment) -> Result<(), RollbackError> { + let action = match environment.action { + Action::Create => self.engine.kubernetes().deploy_environment_error(environment), + Action::Pause => self.engine.kubernetes().pause_environment_error(environment), + Action::Delete => self.engine.kubernetes().delete_environment_error(environment), + Action::Nothing => Ok(()), }; - match environment_action { - EnvironmentAction::Environment(te) => { - // revert changes but there is no failover environment - let target_qe_environment = qe_environment(te); + let _ = match action { + Ok(_) => {} + Err(err) => return Err(RollbackError::CommitError(Box::new(err))), + }; - let action = match te.action { - Action::Create => self - .engine - .kubernetes() - .deploy_environment_error(&target_qe_environment), - Action::Pause => self.engine.kubernetes().pause_environment_error(&target_qe_environment), - Action::Delete => self - .engine - .kubernetes() - .delete_environment_error(&target_qe_environment), - Action::Nothing => Ok(()), - }; - - let _ = match action { - Ok(_) => {} - Err(err) => return Err(RollbackError::CommitError(err)), - }; - - Err(RollbackError::NoFailoverEnvironment) - } - } + Err(RollbackError::NoFailoverEnvironment) } pub fn commit(mut self) -> TransactionResult { - let mut applications_by_environment: HashMap<&Environment, Vec>> = HashMap::new(); - for step in self.steps.clone().into_iter() { // execution loop self.executed_steps.push(step.clone()); @@ -352,24 +318,20 @@ impl<'a> Transaction<'a> { } }; } - Step::BuildEnvironment(environment_action, option) => { - // build applications - let target_environment = match environment_action { - EnvironmentAction::Environment(te) => te, - }; + Step::BuildEnvironment(environment, option) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } - let applications_builds = match self.build_applications(target_environment, &option) { + // build applications + let applications = &mut (environment.as_ref().borrow_mut()).applications; + match self.build_and_push_applications(applications, &option) { Ok(apps) => apps, Err(engine_err) => { - self.logger.log( - LogLevel::Error, - EngineEvent::Error( - engine_err.clone(), - Some(EventMessage::new_from_safe( - "ROLLBACK STARTED! an error occurred".to_string(), - )), - ), - ); + self.logger.log(EngineEvent::Error( + engine_err.clone(), + Some(EventMessage::new_from_safe("ROLLBACK STARTED! an error occurred".to_string())), + )); return if engine_err.tag() == &Tag::TaskCancellationRequested { TransactionResult::Canceled @@ -378,34 +340,14 @@ impl<'a> Transaction<'a> { }; } }; - + } + Step::DeployEnvironment(environment_action) => { if (self.is_transaction_aborted)() { return TransactionResult::Canceled; } - let applications = match self.push_applications(applications_builds, &option) { - Ok(results) => { - let applications = results.into_iter().map(|(app, _)| app).collect::>(); - - applications - } - Err(engine_err) => { - warn!("ROLLBACK STARTED! an error occurred {:?}", engine_err); - return match self.rollback() { - Ok(_) => TransactionResult::Rollback(engine_err), - Err(err) => { - error!("ROLLBACK FAILED! fatal error: {:?}", err); - TransactionResult::UnrecoverableError(engine_err, err) - } - }; - } - }; - - applications_by_environment.insert(target_environment, applications); - } - Step::DeployEnvironment(environment_action) => { // deploy complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().deploy_environment(qe_env) }) { TransactionResult::Ok => {} @@ -416,8 +358,12 @@ impl<'a> Transaction<'a> { }; } Step::PauseEnvironment(environment_action) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // pause complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().pause_environment(qe_env) }) { TransactionResult::Ok => {} @@ -428,8 +374,12 @@ impl<'a> Transaction<'a> { }; } Step::DeleteEnvironment(environment_action) => { + if (self.is_transaction_aborted)() { + return TransactionResult::Canceled; + } + // delete complete environment - match self.commit_environment(environment_action, &applications_by_environment, |qe_env| { + match self.commit_environment(&(environment_action.as_ref().borrow()), |qe_env| { self.engine.kubernetes().delete_environment(qe_env) }) { TransactionResult::Ok => {} @@ -478,11 +428,6 @@ impl<'a> Transaction<'a> { let execution_id = self.engine.context().execution_id(); let lh = ListenersHelper::new(self.engine.kubernetes().listeners()); - // 100 ms sleep to avoid race condition on last service status update - // Otherwise, the last status sent to the CORE is (sometimes) not the right one. - // Even by storing data at the micro seconds precision - thread::sleep(std::time::Duration::from_millis(100)); - match result { Err(err) => { warn!("infrastructure ROLLBACK STARTED! an error occurred {:?}", err); @@ -508,51 +453,25 @@ impl<'a> Transaction<'a> { } } - fn commit_environment( - &self, - environment_action: &EnvironmentAction, - applications_by_environment: &HashMap<&Environment, Vec>>, - action_fn: F, - ) -> TransactionResult + fn commit_environment(&self, environment: &Environment, action_fn: F) -> TransactionResult where - F: Fn(&crate::cloud_provider::environment::Environment) -> Result<(), EngineError>, + F: Fn(&Environment) -> Result<(), EngineError>, { - let target_environment = match environment_action { - EnvironmentAction::Environment(te) => te, - }; - - let empty_vec = Vec::with_capacity(0); - let built_applications = match applications_by_environment.get(target_environment) { - Some(applications) => applications, - None => &empty_vec, - }; - - let qe_environment = target_environment.to_qe_environment( - self.engine.context(), - built_applications, - self.engine.cloud_provider(), - self.logger.clone(), - ); - let execution_id = self.engine.context().execution_id(); // send back the right progress status fn send_progress( kubernetes: &dyn Kubernetes, action: &Action, - service: &Box, + service: &T, execution_id: &str, is_error: bool, ) where T: Service + ?Sized, { let lh = ListenersHelper::new(kubernetes.listeners()); - let progress_info = ProgressInfo::new( - service.progress_scope(), - ProgressLevel::Info, - None::<&str>, - execution_id, - ); + let progress_info = + ProgressInfo::new(service.progress_scope(), ProgressLevel::Info, None::<&str>, execution_id); if !is_error { match action { @@ -572,12 +491,7 @@ impl<'a> Transaction<'a> { }; } - // 100 ms sleep to avoid race condition on last service status update - // Otherwise, the last status sent to the CORE is (sometimes) not the right one. - // Even by storing data at the micro seconds precision - thread::sleep(std::time::Duration::from_millis(100)); - - let _ = match action_fn(&qe_environment) { + let _ = match action_fn(environment) { Err(err) => { let rollback_result = match self.rollback() { Ok(_) => TransactionResult::Rollback(err), @@ -589,48 +503,24 @@ impl<'a> Transaction<'a> { // !!! don't change the order // terminal update - for service in &qe_environment.stateful_services { - send_progress( - self.engine.kubernetes(), - &target_environment.action, - service, - execution_id, - true, - ); + for service in environment.stateful_services() { + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, true); } - for service in &qe_environment.stateless_services { - send_progress( - self.engine.kubernetes(), - &target_environment.action, - service, - execution_id, - true, - ); + for service in environment.stateless_services() { + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, true); } return rollback_result; } _ => { // terminal update - for service in &qe_environment.stateful_services { - send_progress( - self.engine.kubernetes(), - &target_environment.action, - service, - execution_id, - false, - ); + for service in environment.stateful_services() { + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, false); } - for service in &qe_environment.stateless_services { - send_progress( - self.engine.kubernetes(), - &target_environment.action, - service, - execution_id, - false, - ); + for service in environment.stateless_services() { + send_progress(self.engine.kubernetes(), &environment.action, service, execution_id, false); } } }; @@ -672,18 +562,18 @@ impl StepName { } } -pub enum Step<'a> { +pub enum Step { // init and create all the necessary resources (Network, Kubernetes) CreateKubernetes, DeleteKubernetes, PauseKubernetes, - BuildEnvironment(&'a EnvironmentAction, DeploymentOption), - DeployEnvironment(&'a EnvironmentAction), - PauseEnvironment(&'a EnvironmentAction), - DeleteEnvironment(&'a EnvironmentAction), + BuildEnvironment(Rc>, DeploymentOption), + DeployEnvironment(Rc>), + PauseEnvironment(Rc>), + DeleteEnvironment(Rc>), } -impl<'a> Step<'a> { +impl Step { fn step_name(&self) -> StepName { match self { Step::CreateKubernetes => StepName::CreateKubernetes, @@ -697,23 +587,23 @@ impl<'a> Step<'a> { } } -impl<'a> Clone for Step<'a> { +impl Clone for Step { fn clone(&self) -> Self { match self { Step::CreateKubernetes => Step::CreateKubernetes, Step::DeleteKubernetes => Step::DeleteKubernetes, Step::PauseKubernetes => Step::PauseKubernetes, - Step::BuildEnvironment(e, option) => Step::BuildEnvironment(*e, option.clone()), - Step::DeployEnvironment(e) => Step::DeployEnvironment(*e), - Step::PauseEnvironment(e) => Step::PauseEnvironment(*e), - Step::DeleteEnvironment(e) => Step::DeleteEnvironment(*e), + Step::BuildEnvironment(e, option) => Step::BuildEnvironment(e.clone(), option.clone()), + Step::DeployEnvironment(e) => Step::DeployEnvironment(e.clone()), + Step::PauseEnvironment(e) => Step::PauseEnvironment(e.clone()), + Step::DeleteEnvironment(e) => Step::DeleteEnvironment(e.clone()), } } } #[derive(Debug)] pub enum RollbackError { - CommitError(EngineError), + CommitError(Box), NoFailoverEnvironment, Nothing, } diff --git a/src/unit_conversion.rs b/src/unit_conversion.rs index 33b64924..923f5bd8 100644 --- a/src/unit_conversion.rs +++ b/src/unit_conversion.rs @@ -19,7 +19,7 @@ pub fn cpu_string_to_float>(cpu: T) -> f32 { } // the result is in millis, so convert it to float - let cpu = cpu.replace("m", ""); + let cpu = cpu.replace('m', ""); match cpu.parse::() { Ok(v) if v >= 0.0 => v / 1000.0, _ => 0.0, diff --git a/src/utilities.rs b/src/utilities.rs index ef46e10a..d846d63c 100644 --- a/src/utilities.rs +++ b/src/utilities.rs @@ -1,6 +1,7 @@ use std::collections::hash_map::DefaultHasher; use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; +use std::path::Path; use reqwest::header; use reqwest::header::{HeaderMap, HeaderValue}; @@ -19,11 +20,11 @@ pub fn calculate_hash(t: &T) -> u64 { s.finish() } -pub fn get_image_tag( - root_path: &String, - dockerfile_path: &Option, +pub fn compute_image_tag + Hash, T: AsRef + Hash>( + root_path: P, + dockerfile_path: &Option, environment_variables: &BTreeMap, - commit_id: &String, + commit_id: &str, ) -> String { // Image tag == hash(root_path) + commit_id truncate to 127 char // https://github.com/distribution/distribution/blob/6affafd1f030087d88f88841bf66a8abe2bf4d24/reference/regexp.go#L41 @@ -38,8 +39,6 @@ pub fn get_image_tag( // only use when a Dockerfile is used to prevent build cache miss every single time // we redeploy an app with a env var changed with Buildpacks. dockerfile_path.hash(&mut hasher); - - // TODO check if the environment variables are used in the Dockerfile and only Hash the one that are used environment_variables.hash(&mut hasher); } @@ -51,60 +50,60 @@ pub fn get_image_tag( #[cfg(test)] mod tests_utilities { - use crate::utilities::get_image_tag; + use crate::utilities::compute_image_tag; use std::collections::BTreeMap; #[test] fn test_get_image_tag() { - let image_tag = get_image_tag( + let image_tag = compute_image_tag( &"/".to_string(), &Some("Dockerfile".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); - let image_tag_2 = get_image_tag( + let image_tag_2 = compute_image_tag( &"/".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_ne!(image_tag, image_tag_2); - let image_tag_3 = get_image_tag( + let image_tag_3 = compute_image_tag( &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_ne!(image_tag, image_tag_3); - let image_tag_3_2 = get_image_tag( + let image_tag_3_2 = compute_image_tag( &"/xxx".to_string(), &Some("Dockerfile.qovery".to_string()), &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_eq!(image_tag_3, image_tag_3_2); - let image_tag_4 = get_image_tag( + let image_tag_4 = compute_image_tag( &"/".to_string(), - &None, + &None as &Option<&str>, &BTreeMap::new(), - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); let mut env_vars_5 = BTreeMap::new(); env_vars_5.insert("toto".to_string(), "key".to_string()); - let image_tag_5 = get_image_tag( + let image_tag_5 = compute_image_tag( &"/".to_string(), - &None, + &None as &Option<&str>, &env_vars_5, - &"63d8c437337416a7067d3f358197ac47d003fab9".to_string(), + "63d8c437337416a7067d3f358197ac47d003fab9", ); assert_eq!(image_tag_4, image_tag_5); diff --git a/test_utilities/Cargo.lock b/test_utilities/Cargo.lock index f794d3af..e737331b 100644 --- a/test_utilities/Cargo.lock +++ b/test_utilities/Cargo.lock @@ -963,9 +963,9 @@ checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" [[package]] name = "git2" -version = "0.13.25" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" +checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" dependencies = [ "bitflags", "libc", @@ -1464,9 +1464,9 @@ checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" [[package]] name = "libgit2-sys" -version = "0.12.26+1.3.0" +version = "0.13.2+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" dependencies = [ "cc", "libc", @@ -2147,6 +2147,7 @@ dependencies = [ "tracing-subscriber", "trust-dns-resolver", "url 2.2.2", + "urlencoding", "uuid 0.8.2", "walkdir", ] @@ -3319,8 +3320,10 @@ dependencies = [ "serde_derive", "serde_json", "time 0.2.24", + "tokio 1.10.0", "tracing", "tracing-subscriber", + "url 2.2.2", "uuid 0.8.2", ] @@ -3957,6 +3960,12 @@ dependencies = [ "url 1.7.2", ] +[[package]] +name = "urlencoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" + [[package]] name = "uuid" version = "0.7.4" diff --git a/test_utilities/Cargo.toml b/test_utilities/Cargo.toml index f777f2d0..d74d5875 100644 --- a/test_utilities/Cargo.toml +++ b/test_utilities/Cargo.toml @@ -28,6 +28,8 @@ hashicorp_vault = "2.0.1" maplit = "1.0.2" uuid = { version = "0.8", features = ["v4"] } const_format = "0.2.22" +url = "2.2.2" +tokio = { version = "1.10.0", features = ["full"] } # Digital Ocean Deps digitalocean = "0.1.1" diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 9241663c..36fb944c 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -9,12 +9,11 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; use qovery_engine::cloud_provider::Kind::Aws; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; -use qovery_engine::container_registry::docker_hub::DockerHub; use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; +use qovery_engine::io_models::{Context, NoOpProgressListener}; use qovery_engine::logger::Logger; -use qovery_engine::models::Context; use std::str::FromStr; use std::sync::Arc; use tracing::error; @@ -33,7 +32,7 @@ pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; -pub fn container_registry_ecr(context: &Context) -> ECR { +pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR { let secrets = FuncTestsSecrets::new(); if secrets.AWS_ACCESS_KEY_ID.is_none() || secrets.AWS_SECRET_ACCESS_KEY.is_none() @@ -50,31 +49,40 @@ pub fn container_registry_ecr(context: &Context) -> ECR { secrets.AWS_ACCESS_KEY_ID.unwrap().as_str(), secrets.AWS_SECRET_ACCESS_KEY.unwrap().as_str(), secrets.AWS_DEFAULT_REGION.unwrap().as_str(), + Arc::new(Box::new(NoOpProgressListener {})), + logger, ) + .unwrap() } -pub fn container_registry_docker_hub(context: &Context) -> DockerHub { - DockerHub::new( - context.clone(), - "my-docker-hub-id-123", - "my-default-docker-hub", - "qoveryrd", - "3b9481fe-74e7-4d7b-bc08-e147c9fd4f24", +pub fn aws_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + AWS::docker_cr_engine( + &context, + logger, + AWS_TEST_REGION.to_string().as_str(), + AWS_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, ) } - impl Cluster for AWS { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use ECR - let container_registry = Box::new(container_registry_ecr(context)); + let container_registry = Box::new(container_registry_ecr(context, logger.clone())); // use LocalDocker let build_platform = Box::new(build_platform_local_docker(context, logger.clone())); // use AWS let cloud_provider: Arc> = Arc::new(AWS::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let k = get_environment_test_kubernetes( Aws, @@ -82,6 +90,9 @@ impl Cluster for AWS { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs index ab8a96f9..6cee1e77 100644 --- a/test_utilities/src/cloudflare.rs +++ b/test_utilities/src/cloudflare.rs @@ -2,12 +2,12 @@ use crate::common::ClusterDomain; use crate::utilities::FuncTestsSecrets; use qovery_engine::dns_provider::cloudflare::Cloudflare; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{Context, Domain}; +use qovery_engine::io_models::{Context, Domain}; -pub fn dns_provider_cloudflare(context: &Context, domain: ClusterDomain) -> Box { +pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { let secrets = FuncTestsSecrets::new(); let domain = Domain::new(match domain { - ClusterDomain::Custom(domain) => domain, + ClusterDomain::Custom(domain) => domain.to_string(), ClusterDomain::Default => secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set"), }); Box::new(Cloudflare::new( diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index f2e23f4f..af35a0f8 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -2,32 +2,31 @@ extern crate serde; extern crate serde_derive; use chrono::Utc; +use std::cell::RefCell; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{ - Action, Application, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, +use qovery_engine::io_models::{ + Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; -use crate::aws::AWS_KUBERNETES_VERSION; -use crate::cloudflare::dns_provider_cloudflare; -use crate::digitalocean::DO_KUBERNETES_VERSION; -use crate::scaleway::SCW_KUBERNETES_VERSION; +use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; +use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION}; +use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use crate::utilities::{ - db_disk_type, db_infos, db_instance_type, generate_cluster_id, generate_id, generate_password, get_pvc, get_svc, - get_svc_name, init, FuncTestsSecrets, + db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init, + FuncTestsSecrets, }; use base64; use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; +use qovery_engine::cloud_provider::environment::Environment; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, Kind}; @@ -35,11 +34,14 @@ use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::CONTAINER; use qovery_engine::logger::Logger; -use qovery_engine::models::DatabaseMode::CONTAINER; +use qovery_engine::models::digital_ocean::DoRegion; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::transaction::{DeploymentOption, Transaction, TransactionResult}; use std::collections::BTreeMap; use std::path::Path; +use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use tracing::{span, Level}; @@ -55,53 +57,98 @@ pub enum ClusterDomain { } pub trait Cluster { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig; + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig; fn cloud_provider(context: &Context) -> Box; fn kubernetes_nodes() -> Vec; fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option) -> U; } pub trait Infrastructure { + fn build_environment( + &self, + environment: &EnvironmentRequest, + logger: Box, + engine_config: &EngineConfig, + ) -> (Environment, TransactionResult); + fn deploy_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; + fn pause_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; + fn delete_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult; } -impl Infrastructure for Environment { +impl Infrastructure for EnvironmentRequest { + fn build_environment( + &self, + environment: &EnvironmentRequest, + logger: Box, + engine_config: &EngineConfig, + ) -> (Environment, TransactionResult) { + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); + + let env = Rc::new(RefCell::new(env)); + let _ = tx.build_environment( + &env, + DeploymentOption { + force_build: true, + force_push: true, + }, + ); + + let ret = tx.commit(); + (Rc::try_unwrap(env).ok().unwrap().into_inner(), ret) + } + fn deploy_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = Rc::new(RefCell::new(env)); let _ = tx.deploy_environment_with_options( - &environment_action, + &env, DeploymentOption { force_build: true, force_push: true, @@ -113,38 +160,42 @@ impl Infrastructure for Environment { fn pause_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; - - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.pause_environment(&environment_action); + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); + let env = Rc::new(RefCell::new(env)); + let _ = tx.pause_environment(&env); tx.commit() } fn delete_environment( &self, - provider_kind: Kind, - context: &Context, - environment_action: &EnvironmentAction, + environment: &EnvironmentRequest, logger: Box, + engine_config: &EngineConfig, ) -> TransactionResult { - let engine: EngineConfig = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(context, logger.clone()), - Kind::Do => DO::docker_cr_engine(context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(context, logger.clone()), - }; - - let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let _ = tx.delete_environment(&environment_action); + let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); + let env = Rc::new(RefCell::new(env)); + let _ = tx.delete_environment(&env); tx.commit() } @@ -162,7 +213,7 @@ pub fn environment_3_apps_3_routers_3_databases( database_instance_type: &str, database_disk_type: &str, provider_kind: Kind, -) -> Environment { +) -> EnvironmentRequest { let app_name_1 = format!("{}-{}", "simple-app-1".to_string(), generate_id()); let app_name_2 = format!("{}-{}", "simple-app-2".to_string(), generate_id()); let app_name_3 = format!("{}-{}", "simple-app-3".to_string(), generate_id()); @@ -195,7 +246,7 @@ pub fn environment_3_apps_3_routers_3_databases( let database_username_2 = "superuser2".to_string(); let database_name_2 = "postgres2".to_string(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -457,19 +508,14 @@ pub fn environment_3_apps_3_routers_3_databases( } } -pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Environment { +pub fn working_minimal_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); let router_id = generate_id(); let router_name = "main".to_string(); - let application_domain = format!( - "{}.{}.{}", - application_id, - context.cluster_id().to_string(), - test_domain - ); - Environment { + let application_domain = format!("{}.{}.{}", application_id, context.cluster_id().to_string(), test_domain); + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -527,12 +573,12 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi } } -pub fn database_test_environment(context: &Context) -> Environment { +pub fn database_test_environment(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -570,7 +616,10 @@ pub fn database_test_environment(context: &Context) -> Environment { } } -pub fn environment_only_http_server_router_with_sticky_session(context: &Context, test_domain: &str) -> Environment { +pub fn environment_only_http_server_router_with_sticky_session( + context: &Context, + test_domain: &str, +) -> EnvironmentRequest { let mut env = environment_only_http_server_router(context, test_domain.clone()); for mut router in &mut env.routers { @@ -586,7 +635,7 @@ pub fn environnement_2_app_2_routers_1_psql( database_instance_type: &str, database_disk_type: &str, provider_kind: Kind, -) -> Environment { +) -> EnvironmentRequest { let fqdn = get_svc_name(DatabaseKind::Postgresql, provider_kind.clone()).to_string(); let database_port = 5432; @@ -598,7 +647,7 @@ pub fn environnement_2_app_2_routers_1_psql( let application_name1 = sanitize_name("postgresql", &format!("{}-{}", "postgresql-app1", &suffix)); let application_name2 = sanitize_name("postgresql", &format!("{}-{}", "postgresql-app2", &suffix)); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -753,7 +802,7 @@ pub fn environnement_2_app_2_routers_1_psql( } } -pub fn non_working_environment(context: &Context, test_domain: &str) -> Environment { +pub fn non_working_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let mut environment = working_minimal_environment(context, test_domain); environment.applications = environment @@ -772,9 +821,9 @@ pub fn non_working_environment(context: &Context, test_domain: &str) -> Environm // echo app environment is an environment that contains http-echo container (forked from hashicorp) // ECHO_TEXT var will be the content of the application root path -pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment { +pub fn echo_app_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -835,9 +884,9 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment } } -pub fn environment_only_http_server(context: &Context) -> Environment { +pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -884,9 +933,9 @@ pub fn environment_only_http_server(context: &Context) -> Environment { } } -pub fn environment_only_http_server_router(context: &Context, test_domain: &str) -> Environment { +pub fn environment_only_http_server_router(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); - Environment { + EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), owner_id: generate_id(), @@ -981,7 +1030,7 @@ pub fn routers_sessions_are_sticky(routers: Vec) -> bool { pub fn test_db( context: Context, logger: Box, - mut environment: Environment, + mut environment: EnvironmentRequest, secrets: FuncTestsSecrets, version: &str, test_name: &str, @@ -1075,24 +1124,52 @@ pub fn test_db( app.environment_vars = db_infos.app_env_vars.clone(); app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(provider_kind.clone(), &context, &ea, logger.clone()); + let (localisation, kubernetes_version) = match provider_kind { + Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), + Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), + Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + }; + + let engine_config = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); match database_mode.clone() { DatabaseMode::CONTAINER => { - match get_pvc( - context.clone(), - provider_kind.clone(), - environment.clone(), - secrets.clone(), - ) { + match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -1100,12 +1177,7 @@ pub fn test_db( Err(_) => assert!(false), }; - match get_svc( - context.clone(), - provider_kind.clone(), - environment.clone(), - secrets.clone(), - ) { + match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(svc) => assert_eq!( svc.items .expect("No items in svc") @@ -1145,7 +1217,34 @@ pub fn test_db( } } - let ret = environment_delete.delete_environment(provider_kind.clone(), &context_for_delete, &ea_delete, logger); + let engine_config_for_delete = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); @@ -1157,30 +1256,32 @@ pub fn get_environment_test_kubernetes<'a>( cloud_provider: Arc>, dns_provider: Arc>, logger: Box, + localisation: &str, + kubernetes_version: &str, + vpc_network_mode: Option, ) -> Box { let secrets = FuncTestsSecrets::new(); let k: Box; match provider_kind { Kind::Aws => { - let region = secrets - .AWS_DEFAULT_REGION - .as_ref() - .expect("AWS_DEFAULT_REGION is not set") - .as_str(); - let aws_region = AwsRegion::from_str(region).expect("wrong AWS region name, please ensure it's correct"); + let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); + let mut options = AWS::kubernetes_cluster_options(secrets, None); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } k = Box::new( EKS::new( context.clone(), context.cluster_id(), uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()).as_str(), - AWS_KUBERNETES_VERSION, - aws_region.clone(), - aws_region.get_zones_to_string(), + kubernetes_version, + region.clone(), + region.get_zones_to_string(), cloud_provider, dns_provider, - AWS::kubernetes_cluster_options(secrets.clone(), None), + options, AWS::kubernetes_nodes(), logger, ) @@ -1188,6 +1289,7 @@ pub fn get_environment_test_kubernetes<'a>( ); } Kind::Do => { + let region = DoRegion::from_str(localisation).expect("DO region not supported"); k = Box::new( DOKS::new( context.clone(), @@ -1195,14 +1297,7 @@ pub fn get_environment_test_kubernetes<'a>( uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()), DO_KUBERNETES_VERSION.to_string(), - DoRegion::from_str( - secrets - .clone() - .DIGITAL_OCEAN_DEFAULT_REGION - .expect("DIGITAL_OCEAN_DEFAULT_REGION is not set") - .as_str(), - ) - .unwrap(), + region, cloud_provider, dns_provider, DO::kubernetes_nodes(), @@ -1213,6 +1308,7 @@ pub fn get_environment_test_kubernetes<'a>( ); } Kind::Scw => { + let zone = ScwZone::from_str(localisation).expect("SCW zone not supported"); k = Box::new( Kapsule::new( context.clone(), @@ -1220,14 +1316,7 @@ pub fn get_environment_test_kubernetes<'a>( uuid::Uuid::new_v4(), format!("qovery-{}", context.cluster_id()), SCW_KUBERNETES_VERSION.to_string(), - ScwZone::from_str( - secrets - .clone() - .SCALEWAY_DEFAULT_REGION - .expect("SCALEWAY_DEFAULT_REGION is not set") - .as_str(), - ) - .unwrap(), + zone, cloud_provider, dns_provider, Scaleway::kubernetes_nodes(), @@ -1262,7 +1351,9 @@ pub fn get_cluster_test_kubernetes<'a>( Kind::Aws => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); - options.vpc_qovery_network_mode = vpc_network_mode.unwrap(); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); k = Box::new( EKS::new( @@ -1330,27 +1421,44 @@ pub fn cluster_test( logger: Box, localisation: &str, aws_zones: Option>, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, - cluster_domain: ClusterDomain, + cluster_domain: &ClusterDomain, vpc_network_mode: Option, - environment_to_deploy: Option<&EnvironmentAction>, + environment_to_deploy: Option<&EnvironmentRequest>, ) -> String { init(); let span = span!(Level::INFO, "test", name = test_name); let _enter = span.enter(); - - let cluster_id = generate_cluster_id(localisation.clone()); - let cluster_name = generate_cluster_id(localisation.clone()); let boot_version = format!("{}.{}", major_boot_version, minor_boot_version.clone()); let engine = match provider_kind { - Kind::Aws => AWS::docker_cr_engine(&context, logger.clone()), - Kind::Do => DO::docker_cr_engine(&context, logger.clone()), - Kind::Scw => Scaleway::docker_cr_engine(&context, logger.clone()), + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation, + boot_version, + cluster_domain, + vpc_network_mode.clone(), + ), }; let mut deploy_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut delete_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); @@ -1362,28 +1470,6 @@ pub fn cluster_test( } }; - let dns_provider = Arc::new(dns_provider_cloudflare(&context, cluster_domain)); - let cp: Arc> = match provider_kind { - Kind::Aws => Arc::new(AWS::cloud_provider(&context)), - Kind::Do => Arc::new(DO::cloud_provider(&context)), - Kind::Scw => Arc::new(Scaleway::cloud_provider(&context)), - }; - - let kubernetes = get_cluster_test_kubernetes( - provider_kind.clone(), - secrets.clone(), - &context, - cluster_id.clone(), - cluster_name.clone(), - boot_version.clone(), - localisation.clone(), - aws_zones.clone(), - cp.clone(), - dns_provider.clone(), - vpc_network_mode.clone(), - logger.clone(), - ); - // Deploy if let Err(err) = deploy_tx.create_kubernetes() { panic!("{:?}", err) @@ -1396,7 +1482,16 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - if let Err(err) = deploy_env_tx.deploy_environment(env) { + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); + let env = Rc::new(RefCell::new(env)); + if let Err(err) = deploy_env_tx.deploy_environment(&env) { panic!("{:?}", err) } @@ -1404,10 +1499,11 @@ pub fn cluster_test( } if let Err(err) = metrics_server_test( - kubernetes + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - kubernetes.cloud_provider().credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } @@ -1433,30 +1529,43 @@ pub fn cluster_test( assert!(matches!(resume_tx.commit(), TransactionResult::Ok)); if let Err(err) = metrics_server_test( - kubernetes + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - kubernetes.cloud_provider().credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } } ClusterTestType::WithUpgrade => { let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version.clone() + 1); - let upgraded_kubernetes = get_cluster_test_kubernetes( - provider_kind.clone(), - secrets.clone(), - &context, - cluster_id.clone(), - cluster_name.clone(), - upgrade_to_version.clone(), - localisation.clone(), - aws_zones, - cp, - dns_provider, - vpc_network_mode.clone(), - logger.clone(), - ); + let engine = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation, + upgrade_to_version, + cluster_domain, + vpc_network_mode.clone(), + ), + }; let mut upgrade_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut delete_tx = @@ -1469,14 +1578,11 @@ pub fn cluster_test( assert!(matches!(upgrade_tx.commit(), TransactionResult::Ok)); if let Err(err) = metrics_server_test( - upgraded_kubernetes - .as_ref() + engine + .kubernetes() .get_kubeconfig_file_path() .expect("Unable to get config file path"), - upgraded_kubernetes - .as_ref() - .cloud_provider() - .credentials_environment_variables(), + engine.kubernetes().cloud_provider().credentials_environment_variables(), ) { panic!("{:?}", err) } @@ -1497,7 +1603,16 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - if let Err(err) = destroy_env_tx.delete_environment(env) { + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); + let env = Rc::new(RefCell::new(env)); + if let Err(err) = destroy_env_tx.delete_environment(&env) { panic!("{:?}", err) } assert!(matches!(destroy_env_tx.commit(), TransactionResult::Ok)); @@ -1529,9 +1644,7 @@ where .expect("No hpa condition.") .contains("ValidMetricFound") { - return Err(CommandError::new_from_safe_message( - "Metrics server doesn't work".to_string(), - )); + return Err(CommandError::new_from_safe_message("Metrics server doesn't work".to_string())); } } Ok(()) diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 38c7aadc..36a5db93 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -1,5 +1,5 @@ use const_format::formatcp; -use qovery_engine::build_platform::Image; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; use qovery_engine::cloud_provider::digitalocean::DO; @@ -7,18 +7,18 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::error::EngineError; -use qovery_engine::models::{Context, Environment}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; +use qovery_engine::models::digital_ocean::DoRegion; pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const DO_KUBERNETES_MINOR_VERSION: u8 = 20; @@ -36,13 +36,33 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { DOCR::new( context.clone(), DOCR_ID, - "default-docr-registry-qovery-do-test", + DOCR_ID, secrets.DIGITAL_OCEAN_TOKEN.unwrap().as_str(), + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap() +} + +pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + DO::docker_cr_engine( + &context, + logger, + DO_TEST_REGION.to_string().as_str(), + DO_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, ) } impl Cluster for DO { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use DigitalOcean Container Registry let container_registry = Box::new(container_registry_digital_ocean(context)); // use LocalDocker @@ -50,8 +70,7 @@ impl Cluster for DO { // use Digital Ocean let cloud_provider: Arc> = Arc::new(Self::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let k = get_environment_test_kubernetes( Do, @@ -59,6 +78,9 @@ impl Cluster for DO { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( @@ -141,11 +163,11 @@ impl Cluster for DO { pub fn clean_environments( context: &Context, - environments: Vec, + _environments: Vec, secrets: FuncTestsSecrets, _region: DoRegion, ) -> Result<(), EngineError> { - let do_cr = DOCR::new( + let _do_cr = DOCR::new( context.clone(), "test", "test", @@ -153,16 +175,26 @@ pub fn clean_environments( .DIGITAL_OCEAN_TOKEN .as_ref() .expect("DIGITAL_OCEAN_TOKEN is not set in secrets"), + Arc::new(Box::new(NoOpProgressListener {})), ); + // FIXME: re-enable it, or let pleco do its job ? + /* // delete images created in registry + let registry_url = do_cr.login()?; for env in environments.iter() { - for image in env.applications.iter().map(|a| a.to_image()).collect::>() { - if let Err(e) = do_cr.delete_image(&image) { - return Err(e); - } + for image in env + .applications + .iter() + .map(|a| a.to_image(®istry_url)) + .collect::>() + { + //if let Err(e) = do_cr.delete_registry(&image.name) { + // return Err(e); + //} } } + */ Ok(()) } diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 6dd3a0c7..d3c570bf 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,13 +1,11 @@ use const_format::formatcp; -use qovery_engine::build_platform::Image; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; +use qovery_engine::build_platform::Build; use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::error::EngineError; -use qovery_engine::models::{Context, Environment}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; @@ -15,11 +13,15 @@ use crate::cloudflare::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Scw; +use qovery_engine::container_registry::errors::ContainerRegistryError; +use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::logger::Logger; +use qovery_engine::models::scaleway::ScwZone; use tracing::error; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; @@ -57,11 +59,31 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { scw_secret_key.as_str(), scw_default_project_id.as_str(), SCW_TEST_ZONE, + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap() +} + +pub fn scw_default_engine_config(context: &Context, logger: Box) -> EngineConfig { + Scaleway::docker_cr_engine( + &context, + logger, + SCW_TEST_ZONE.to_string().as_str(), + SCW_KUBERNETES_VERSION.to_string(), + &ClusterDomain::Default, + None, ) } impl Cluster for Scaleway { - fn docker_cr_engine(context: &Context, logger: Box) -> EngineConfig { + fn docker_cr_engine( + context: &Context, + logger: Box, + localisation: &str, + kubernetes_version: String, + cluster_domain: &ClusterDomain, + vpc_network_mode: Option, + ) -> EngineConfig { // use Scaleway CR let container_registry = Box::new(container_registry_scw(context)); @@ -70,8 +92,7 @@ impl Cluster for Scaleway { // use Scaleway let cloud_provider: Arc> = Arc::new(Self::cloud_provider(context)); - let dns_provider: Arc> = - Arc::new(dns_provider_cloudflare(context, ClusterDomain::Default)); + let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let cluster = get_environment_test_kubernetes( Scw, @@ -79,6 +100,9 @@ impl Cluster for Scaleway { cloud_provider.clone(), dns_provider.clone(), logger.clone(), + localisation, + kubernetes_version.as_str(), + vpc_network_mode, ); EngineConfig::new( @@ -199,10 +223,10 @@ pub fn scw_object_storage(context: Context, region: ScwZone) -> ScalewayOS { pub fn clean_environments( context: &Context, - environments: Vec, + environments: Vec, secrets: FuncTestsSecrets, zone: ScwZone, -) -> Result<(), EngineError> { +) -> Result<(), ContainerRegistryError> { let secret_token = secrets.SCALEWAY_SECRET_KEY.unwrap(); let project_id = secrets.SCALEWAY_DEFAULT_PROJECT_ID.unwrap(); @@ -213,14 +237,19 @@ pub fn clean_environments( secret_token.as_str(), project_id.as_str(), zone, - ); + Arc::new(Box::new(NoOpProgressListener {})), + )?; // delete images created in registry + let registry_url = container_registry_client.registry_info(); for env in environments.iter() { - for image in env.applications.iter().map(|a| a.to_image()).collect::>() { - if let Err(e) = container_registry_client.delete_image(&image) { - return Err(e); - } + for build in env + .applications + .iter() + .map(|a| a.to_build(®istry_url)) + .collect::>() + { + let _ = container_registry_client.delete_image(&build.image); } } diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index c714efe0..3b553377 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -9,17 +9,18 @@ use curl::easy::Easy; use dirs::home_dir; use gethostname; use std::collections::BTreeMap; -use std::io::{Error, ErrorKind, Read, Write}; +use std::io::{Error, ErrorKind, Write}; use std::path::Path; -use std::str::FromStr; use passwords::PasswordGenerator; +use qovery_engine::cloud_provider::digitalocean::kubernetes::doks_api::get_do_kubeconfig_by_cluster_name; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use retry::delay::Fibonacci; use retry::OperationResult; use std::env; use std::fs; +use std::str::FromStr; use tracing::{info, warn}; use crate::scaleway::{ @@ -28,14 +29,13 @@ use crate::scaleway::{ }; use hashicorp_vault; use qovery_engine::build_platform::local_docker::LocalDocker; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd; use qovery_engine::constants::{ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID, DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY, }; -use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, Environment, Features, Metadata}; +use qovery_engine::io_models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; use retry::Error::Operation; use serde::{Deserialize, Serialize}; @@ -44,17 +44,17 @@ use crate::digitalocean::{ DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, }; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cmd::command::QoveryCommand; +use qovery_engine::cmd::docker::Docker; use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc}; use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC}; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::MANAGED; use qovery_engine::logger::{Logger, StdIoLogger}; -use qovery_engine::models::DatabaseMode::MANAGED; -use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces}; -use qovery_engine::object_storage::ObjectStorage; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::runtime::block_on; use time::Instant; +use url::Url; pub fn context(organization_id: &str, cluster_id: &str) -> Context { let organization_id = organization_id.to_string(); @@ -62,6 +62,8 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { let execution_id = execution_id(); let home_dir = std::env::var("WORKSPACE_ROOT_DIR").unwrap_or(home_dir().unwrap().to_str().unwrap().to_string()); let lib_root_dir = std::env::var("LIB_ROOT_DIR").expect("LIB_ROOT_DIR is mandatory"); + let docker_host = std::env::var("DOCKER_HOST").map(|x| Url::parse(&x).unwrap()).ok(); + let docker = Docker::new(docker_host.clone()).expect("Can't init docker"); let metadata = Metadata { dry_run_deploy: Option::from({ @@ -80,7 +82,6 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { None => Some(7200), } }, - docker_build_options: Some("--network host".to_string()), forced_upgrade: Option::from({ match env::var_os("forced_upgrade") { Some(_) => true, @@ -90,7 +91,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { disable_pleco: Some(true), }; - let enabled_features = vec![Features::LogsHistory, Features::MetricsHistory]; + let enabled_features = vec![Features::LogsHistory]; Context::new( organization_id, @@ -99,9 +100,10 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { home_dir, lib_root_dir, true, - None, + docker_host, enabled_features, Option::from(metadata), + docker, ) } @@ -363,7 +365,7 @@ impl FuncTestsSecrets { } pub fn build_platform_local_docker(context: &Context, logger: Box) -> LocalDocker { - LocalDocker::new(context.clone(), "oxqlm3r99vwcmvuj", "qovery-local-docker", logger) + LocalDocker::new(context.clone(), "oxqlm3r99vwcmvuj", "qovery-local-docker", logger).unwrap() } pub fn init() -> Instant { @@ -453,14 +455,11 @@ pub fn generate_password(provider_kind: Kind, db_mode: DatabaseMode) -> String { password } -pub fn check_all_connections(env: &Environment) -> Vec { +pub fn check_all_connections(env: &EnvironmentRequest) -> Vec { let mut checking: Vec = Vec::with_capacity(env.routers.len()); for router_to_test in &env.routers { - let path_to_test = format!( - "https://{}{}", - &router_to_test.default_domain, &router_to_test.routes[0].path - ); + let path_to_test = format!("https://{}{}", &router_to_test.default_domain, &router_to_test.routes[0].path); checking.push(curl_path(path_to_test.as_str())); } @@ -534,64 +533,24 @@ where ) } Kind::Do => { - let region_raw = secrets - .DIGITAL_OCEAN_DEFAULT_REGION - .as_ref() - .expect(&"DIGITAL_OCEAN_DEFAULT_REGION should be set".to_string()) - .to_string(); + let cluster_name = format!("qovery-{}", context.cluster_id()); + let kubeconfig = match get_do_kubeconfig_by_cluster_name( + secrets.clone().DIGITAL_OCEAN_TOKEN.unwrap().as_str(), + cluster_name.clone().as_str(), + ) { + Ok(kubeconfig) => kubeconfig, + Err(e) => return OperationResult::Retry(CommandError::new(e.message(), Some(e.message()))), + }; - match DoRegion::from_str(region_raw.as_str()) { - Ok(region) => { - let spaces = Spaces::new( - context.clone(), - "fake".to_string(), - "fake".to_string(), - secrets - .DIGITAL_OCEAN_SPACES_ACCESS_ID - .as_ref() - .expect(&"DIGITAL_OCEAN_SPACES_ACCESS_ID should be set".to_string()) - .to_string(), - secrets - .DIGITAL_OCEAN_SPACES_SECRET_ID - .as_ref() - .expect(&"DIGITAL_OCEAN_SPACES_SECRET_ID should be set".to_string()) - .to_string(), - region, - BucketDeleteStrategy::HardDelete, - ); - - match spaces.get( - kubernetes_config_bucket_name.as_str(), - kubernetes_config_object_key.as_str(), - false, - ) { - Ok((_, mut file)) => { - let mut content = String::new(); - match file.read_to_string(&mut content) { - Ok(_) => Ok(content), - Err(e) => { - let message_safe = "Error while trying to read file"; - Err(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e), - Some(message_safe.to_string()), - )) - } - } - } - Err(e) => { - let message_safe = "Error while trying to get kubeconfig from spaces"; - Err(CommandError::new( - format!( - "{}, error: {}", - message_safe.to_string(), - e.message.unwrap_or("no error message".to_string()) - ), - Some(message_safe.to_string()), - )) - } - } + match kubeconfig { + None => Err(CommandError::new( + "No kubeconfig found".to_string(), + Some("No kubeconfig found".to_string()), + )), + Some(file_content) => { + let _ = "test"; + Ok(file_content) } - Err(e) => Err(e), } } Kind::Scw => { @@ -677,9 +636,7 @@ where } } - Err(CommandError::new_from_safe_message( - "Test cluster not found".to_string(), - )) + Err(CommandError::new_from_safe_message("Test cluster not found".to_string())) } }; @@ -729,10 +686,7 @@ fn get_cloud_provider_credentials(provider_kind: Kind, secrets: &FuncTestsSecret match provider_kind { Kind::Aws => vec![ (AWS_ACCESS_KEY_ID, secrets.AWS_ACCESS_KEY_ID.as_ref().unwrap().as_str()), - ( - AWS_SECRET_ACCESS_KEY, - secrets.AWS_SECRET_ACCESS_KEY.as_ref().unwrap().as_str(), - ), + (AWS_SECRET_ACCESS_KEY, secrets.AWS_SECRET_ACCESS_KEY.as_ref().unwrap().as_str()), ], Kind::Do => vec![ ( @@ -758,14 +712,8 @@ fn get_cloud_provider_credentials(provider_kind: Kind, secrets: &FuncTestsSecret ), ], Kind::Scw => vec![ - ( - SCALEWAY_ACCESS_KEY, - secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str(), - ), - ( - SCALEWAY_SECRET_KEY, - secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str(), - ), + (SCALEWAY_ACCESS_KEY, secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str()), + (SCALEWAY_SECRET_KEY, secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str()), ( SCALEWAY_DEFAULT_PROJECT_ID, secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str(), @@ -806,15 +754,11 @@ fn aws_s3_get_object( pub fn is_pod_restarted_env( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, pod_to_check: &str, secrets: FuncTestsSecrets, ) -> (bool, String) { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -841,15 +785,11 @@ pub fn is_pod_restarted_env( pub fn get_pods( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, pod_to_check: &str, secrets: FuncTestsSecrets, ) -> Result, CommandError> { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -914,14 +854,10 @@ pub fn generate_cluster_id(region: &str) -> String { pub fn get_pvc( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -943,14 +879,10 @@ pub fn get_pvc( pub fn get_svc( context: Context, provider_kind: Kind, - environment_check: Environment, + environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!( - "{}-{}", - &environment_check.project_id.clone(), - &environment_check.id.clone(), - ); + let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index a3ffbfba..e201076c 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -2,16 +2,15 @@ extern crate test_utilities; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TYPE}; use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; use test_utilities::common::{test_db, Infrastructure}; @@ -47,7 +46,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -61,16 +62,16 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -99,7 +100,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -114,31 +117,25 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.pause_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db let app_name = format!("postgresql{}-0", environment.databases[0].name); - let ret = get_pods( - context.clone(), - Kind::Aws, - environment.clone(), - app_name.clone().as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Aws, environment, app_name.as_str(), secrets); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -168,7 +165,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -192,10 +191,10 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_for_deletion = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -205,10 +204,10 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_for_deletion, logger); + let ret = environment_delete.delete_environment(&ea_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -238,8 +237,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = aws_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -302,45 +304,38 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let ea_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = - environment_to_redeploy.deploy_environment(Kind::Aws, &context_for_redeploy, &ea_redeploy, logger.clone()); + let ret = environment_to_redeploy.deploy_environment(&ea_redeploy, logger.clone(), &engine_config_for_redeploy); assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY let database_name = format!("postgresql{}-0", &environment_check.databases[0].name); - match is_pod_restarted_env( - context.clone(), - Kind::Aws, - environment_check, - database_name.as_str(), - secrets.clone(), - ) { + match is_pod_restarted_env(context, Kind::Aws, environment_check, database_name.as_str(), secrets) { (true, _) => assert!(true), (false, _) => assert!(false), } - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - return test_name.to_string(); + test_name.to_string() }) } @@ -394,6 +389,7 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -408,6 +404,7 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } @@ -422,6 +419,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -546,6 +544,7 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } @@ -560,6 +559,7 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } @@ -574,6 +574,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -603,6 +604,7 @@ fn private_mongodb_v3_6_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_prod_environment() { test_mongodb_configuration("3.6", function_name!(), MANAGED, true); } @@ -617,6 +619,7 @@ fn private_mongodb_v4_0_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_prod_environment() { test_mongodb_configuration("4.0", function_name!(), MANAGED, true); } @@ -670,6 +673,7 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } @@ -766,6 +770,7 @@ fn private_redis_v5_deploy_a_working_dev_environment() { #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } @@ -795,6 +800,7 @@ fn private_redis_v5_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_prod_environment() { test_redis_configuration("5", function_name!(), MANAGED, true); } @@ -809,6 +815,7 @@ fn private_redis_v6_deploy_a_working_prod_environment() { #[cfg(feature = "test-aws-managed-services")] #[named] #[test] +#[ignore] fn public_redis_v6_deploy_a_working_prod_environment() { test_redis_configuration("6", function_name!(), MANAGED, true); } diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index 1de17677..d45cc26f 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -5,23 +5,69 @@ use self::test_utilities::utilities::{ engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; -use test_utilities::aws::container_registry_ecr; -use test_utilities::utilities::{build_platform_local_docker, context, init, kubernetes_config_path}; +use std::thread; +use std::time::Duration; +use test_utilities::aws::aws_default_engine_config; +use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; -// TODO: -// - Tests that applications are always restarted when receiving a CREATE action -// see: https://github.com/Qovery/engine/pull/269 +#[cfg(feature = "test-aws-minimal")] +#[named] +#[test] +fn aws_test_build_phase() { + // This test tries to run up to the build phase of the engine + // basically building and pushing each applications + let test_name = function_name!(); + engine_run_test(|| { + init(); + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); -#[cfg(feature = "test-aws-self-hosted")] + let logger = logger(); + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .AWS_TEST_ORGANIZATION_ID + .as_ref() + .expect("AWS_TEST_ORGANIZATION_ID is not set") + .as_str(), + secrets + .AWS_TEST_CLUSTER_ID + .as_ref() + .expect("AWS_TEST_CLUSTER_ID is not set") + .as_str(), + ); + let engine_config = aws_default_engine_config(&context, logger.clone()); + let mut environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + environment.routers = vec![]; + let ea = environment.clone(); + + let (env, ret) = environment.build_environment(&ea, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} + +#[cfg(feature = "test-aws-minimal")] #[named] #[test] fn deploy_a_working_environment_with_no_router_on_aws_eks() { @@ -45,7 +91,10 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); + let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -59,107 +108,16 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_for_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); - }) -} - -#[cfg(feature = "test-aws-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .AWS_TEST_ORGANIZATION_ID - .as_ref() - .expect("AWS_TEST_ORGANIZATION_ID is not set") - .as_str(), - secrets - .AWS_TEST_CLUSTER_ID - .as_ref() - .expect("AWS_TEST_CLUSTER_ID is not set") - .as_str(), - ); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let ecr = container_registry_ecr(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match ecr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match ecr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match ecr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match ecr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); + test_name.to_string() }) } @@ -187,7 +145,10 @@ fn deploy_a_working_environment_and_pause_it_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); + let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -197,10 +158,10 @@ fn deploy_a_working_environment_and_pause_it_eks() { .as_str(), ); - let ea = EnvironmentAction::Environment(environment.clone()); - let selector = format!("appId={}", environment.clone().applications[0].id); + let ea = environment.clone(); + let selector = format!("appId={}", environment.applications[0].id); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -213,7 +174,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment(Kind::Aws, &context_for_delete, &ea, logger.clone()); + let ret = environment.pause_environment(&ea, logger.clone(), &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -264,16 +225,11 @@ fn deploy_a_working_environment_and_pause_it_eks() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let ret = environment.deploy_environment(Kind::Aws, &ctx_resume, &ea, logger.clone()); + let engine_config_resume = aws_default_engine_config(&ctx_resume, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Aws, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Aws, environment.clone(), selector.as_str(), secrets.clone()); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); @@ -317,10 +273,10 @@ fn deploy_a_working_environment_and_pause_it_eks() { assert!(filtered_pdb); // Cleanup - let ret = environment.delete_environment(Kind::Aws, &context_for_delete, &ea, logger); + let ret = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -348,7 +304,9 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -362,19 +320,19 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - return test_name.to_string(); + test_name.to_string() }) } @@ -403,7 +361,9 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -429,21 +389,21 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -472,7 +432,9 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -498,21 +460,21 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -541,7 +503,9 @@ fn deploy_a_working_environment_with_domain() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -553,16 +517,16 @@ fn deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -591,7 +555,9 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -617,18 +583,18 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { + match get_pvc(context, Kind::Aws, environment, secrets) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -636,10 +602,10 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { Err(_) => assert!(false), }; - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -669,8 +635,11 @@ fn redeploy_same_app_with_ebs() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = aws_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = aws_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -696,21 +665,21 @@ fn redeploy_same_app_with_ebs() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); let environment_check2 = environment.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea2 = EnvironmentAction::Environment(environment_redeploy.clone()); - let ea_delete = EnvironmentAction::Environment(environment_delete.clone()); + let ea = environment.clone(); + let ea2 = environment_redeploy.clone(); + let ea_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match get_pvc(context.clone(), Kind::Aws, environment.clone(), secrets.clone()) { + match get_pvc(context.clone(), Kind::Aws, environment, secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) @@ -723,26 +692,20 @@ fn redeploy_same_app_with_ebs() { context.clone(), Kind::Aws, environment_check1, - app_name.clone().as_str(), - secrets.clone(), - ); - - let ret = environment_redeploy.deploy_environment(Kind::Aws, &context_bis, &ea2, logger.clone()); - assert!(matches!(ret, TransactionResult::Ok)); - - let (_, number2) = is_pod_restarted_env( - context.clone(), - Kind::Aws, - environment_check2, app_name.as_str(), secrets.clone(), ); - //nothing change in the app, so, it shouldn't be restarted - assert!(number.eq(&number2)); - let ret = environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_delete, logger); + + let ret = environment_redeploy.deploy_environment(&ea2, logger.clone(), &engine_config_bis); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + let (_, number2) = is_pod_restarted_env(context, Kind::Aws, environment_check2, app_name.as_str(), secrets); + //nothing change in the app, so, it shouldn't be restarted + assert!(number.eq(&number2)); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_deletion); + assert!(matches!(ret, TransactionResult::Ok)); + + test_name.to_string() }) } @@ -771,8 +734,11 @@ fn deploy_a_not_working_environment_and_after_working_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = aws_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -794,30 +760,29 @@ fn deploy_a_not_working_environment_and_after_working_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; // environment actions - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let ea_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let ea = environment.clone(); + let ea_not_working = environment_for_not_working.clone(); + let ea_delete = environment_for_delete.clone(); let ret = environment_for_not_working.deploy_environment( - Kind::Aws, - &context_for_not_working, &ea_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = environment_for_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -850,6 +815,7 @@ fn deploy_ok_fail_fail_ok_environment() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -860,6 +826,7 @@ fn deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = aws_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -871,58 +838,52 @@ fn deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = aws_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let ea_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let ea_delete = EnvironmentAction::Environment(delete_env.clone()); + let ea = environment.clone(); + let ea_not_working_1 = not_working_env_1.clone(); + let ea_not_working_2 = not_working_env_2.clone(); + let ea_delete = delete_env.clone(); // OK - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // FAIL and rollback - let ret = not_working_env_1.deploy_environment( - Kind::Aws, - &context_for_not_working_1, - &ea_not_working_1, - logger.clone(), - ); + let ret = + not_working_env_1.deploy_environment(&ea_not_working_1, logger.clone(), &engine_config_for_not_working_1); assert!(matches!( ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) )); // FAIL and Rollback again - let ret = not_working_env_2.deploy_environment( - Kind::Aws, - &context_for_not_working_2, - &ea_not_working_2, - logger.clone(), - ); + let ret = + not_working_env_2.deploy_environment(&ea_not_working_2, logger.clone(), &engine_config_for_not_working_2); assert!(matches!( ret, TransactionResult::Rollback(_) | TransactionResult::UnrecoverableError(_, _) )); // Should be working - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -951,6 +912,7 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { .expect("AWS_TEST_CLUSTER_ID is not set") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -960,19 +922,20 @@ fn deploy_a_non_working_environment_with_no_failover_on_aws_eks() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let ea = EnvironmentAction::Environment(environment.clone()); - let ea_delete = EnvironmentAction::Environment(delete_env.clone()); + let ea = environment.clone(); + let ea_delete = delete_env.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()); + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); - let ret = delete_env.delete_environment(Kind::Aws, &context_for_delete, &ea_delete, logger); + let ret = delete_env.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -1001,7 +964,9 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { .expect("AWS_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = aws_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = aws_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -1014,17 +979,18 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let ret = environment.deploy_environment(Kind::Aws, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(10)); // checking if cookie is properly set on the app - assert!(routers_sessions_are_sticky(environment.routers.clone())); + assert!(routers_sessions_are_sticky(environment.routers)); - let ret = - environment_for_delete.delete_environment(Kind::Aws, &context_for_delete, &env_action_for_delete, logger); + let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); test_name.to_string() diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index f5bd8a4c..53f790b8 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -1,13 +1,11 @@ extern crate test_utilities; use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; -use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; +use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; @@ -15,8 +13,6 @@ use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-aws-infra")] fn create_and_destroy_eks_cluster( region: String, - zones: Vec, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -25,6 +21,7 @@ fn create_and_destroy_eks_cluster( ) { engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let zones = region.get_zones(); cluster_test( test_name, Kind::Aws, @@ -35,11 +32,10 @@ fn create_and_destroy_eks_cluster( logger(), region.to_aws_format().as_str(), Some(zones), - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, Option::from(vpc_network_mode), None, ) @@ -55,13 +51,9 @@ fn create_and_destroy_eks_cluster( #[named] #[test] fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -74,13 +66,9 @@ fn create_and_destroy_eks_cluster_without_nat_gw_in_eu_west_3() { #[named] #[test] fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -93,13 +81,9 @@ fn create_and_destroy_eks_cluster_with_nat_gw_in_eu_west_3() { #[named] #[test] fn create_and_destroy_eks_cluster_in_us_east_2() { - let secrets = FuncTestsSecrets::new(); let region = "us-east-2".to_string(); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, @@ -108,20 +92,30 @@ fn create_and_destroy_eks_cluster_in_us_east_2() { ); } +#[cfg(feature = "test-aws-infra")] +#[named] +#[test] +fn create_pause_and_destroy_eks_cluster_in_us_east_2() { + let region = "us-east-2".to_string(); + create_and_destroy_eks_cluster( + region, + ClusterTestType::WithPause, + AWS_KUBERNETES_MAJOR_VERSION, + AWS_KUBERNETES_MINOR_VERSION, + WithoutNatGateways, + function_name!(), + ); +} + // only enable this test manually when we want to perform and validate upgrade process #[cfg(feature = "test-aws-infra")] #[named] #[test] #[ignore] fn create_upgrade_and_destroy_eks_cluster_in_eu_west_3() { - let secrets = FuncTestsSecrets::new(); - let region = secrets.AWS_DEFAULT_REGION.clone().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); - + let region = "eu-west-3".to_string(); create_and_destroy_eks_cluster( region, - AwsRegion::get_zones(&aws_region), - secrets, ClusterTestType::WithUpgrade, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, diff --git a/tests/aws/aws_s3.rs b/tests/aws/aws_s3.rs index bc25292a..33ac7aa7 100644 --- a/tests/aws/aws_s3.rs +++ b/tests/aws/aws_s3.rs @@ -12,17 +12,17 @@ fn test_delete_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, aws_region.clone(), @@ -34,17 +34,13 @@ fn test_delete_bucket() { aws_os .create_bucket(bucket_name.as_str()) - .expect(format!("error while creating S3 bucket in `{}`", aws_region.to_aws_format()).as_str()); + .unwrap_or_else(|_| panic!("error while creating S3 bucket in `{}`", aws_region.to_aws_format())); // compute: let result = aws_os.delete_bucket(bucket_name.as_str()); // validate: - assert!( - result.is_ok(), - "Delete bucket failed in `{}`", - aws_region.to_aws_format() - ); + assert!(result.is_ok(), "Delete bucket failed in `{}`", aws_region.to_aws_format()); assert!( !aws_os.bucket_exists(bucket_name.as_str()), "Delete bucket failed in `{}`, bucket still exists", @@ -59,17 +55,17 @@ fn test_create_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, aws_region.clone(), @@ -83,11 +79,7 @@ fn test_create_bucket() { let result = aws_os.create_bucket(bucket_name.as_str()); // validate: - assert!( - result.is_ok(), - "Create bucket failed in `{}`", - aws_region.to_aws_format() - ); + assert!(result.is_ok(), "Create bucket failed in `{}`", aws_region.to_aws_format()); assert!( aws_os.bucket_exists(bucket_name.as_str()), "Create bucket failed in `{}`, bucket doesn't exist", @@ -95,13 +87,9 @@ fn test_create_bucket() { ); // clean-up: - aws_os.delete_bucket(bucket_name.as_str()).unwrap_or_else(|_| { - panic!( - "error deleting S3 bucket `{}` in `{}`", - bucket_name, - aws_region.to_aws_format() - ) - }); + aws_os + .delete_bucket(bucket_name.as_str()) + .unwrap_or_else(|_| panic!("error deleting S3 bucket `{}` in `{}`", bucket_name, aws_region.to_aws_format())); } #[cfg(feature = "test-aws-infra")] @@ -111,20 +99,20 @@ fn test_recreate_bucket() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); @@ -156,20 +144,20 @@ fn test_put_file() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); @@ -207,20 +195,20 @@ fn test_get_file() { let context = context("fake_orga_id", "fake_cluster_id"); let secrets = FuncTestsSecrets::new(); let id = generate_id(); - let name = format!("test-{}", id.to_string()); + let name = format!("test-{}", id); let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) - .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + .unwrap_or_else(|_| panic!("AWS region `{}` seems not to be valid", aws_region_raw)); let aws_os = S3::new( context.clone(), - id.to_string(), - name.to_string(), + id, + name, aws_access_key, aws_secret_key, - aws_region.clone(), + aws_region, false, context.resource_expiration_in_seconds(), ); diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index ca9a8b90..9dbf76d3 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -2,7 +2,6 @@ use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::WithNatGateways; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; use std::str::FromStr; use test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; @@ -15,7 +14,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { let secrets = FuncTestsSecrets::new(); let region = secrets.AWS_DEFAULT_REGION.as_ref().expect("AWS region was not found"); - let aws_region = AwsRegion::from_str(®ion).expect("Wasn't able to convert the desired region"); + let aws_region = AwsRegion::from_str(region).expect("Wasn't able to convert the desired region"); let aws_zones = aws_region.get_zones(); let organization_id = generate_id(); @@ -33,7 +32,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment; engine_run_test(|| { cluster_test( @@ -41,13 +40,12 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { Kind::Aws, context.clone(), logger(), - ®ion, + region, Some(aws_zones), - secrets.clone(), ClusterTestType::Classic, AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), Some(WithNatGateways), Some(&env_action), ) diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index ca035c44..48826a55 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -2,18 +2,16 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; -use test_utilities::common::{database_test_environment, test_db, working_minimal_environment, Infrastructure}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; +use test_utilities::common::{database_test_environment, test_db, Infrastructure}; use test_utilities::digitalocean::{ - clean_environments, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, + clean_environments, do_default_engine_config, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, DO_SELF_HOSTED_DATABASE_DISK_TYPE, DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE, DO_TEST_REGION, }; @@ -47,7 +45,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -62,13 +62,13 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries @@ -76,7 +76,7 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -103,7 +103,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -118,13 +120,13 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action.clone(), logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = environment.pause_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.pause_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -133,21 +135,21 @@ fn deploy_an_environment_with_db_and_pause_it() { context.clone(), ProviderKind::Do, environment.clone(), - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); - let ret = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments(&context, vec![environment], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -175,7 +177,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -199,10 +203,10 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_deletion = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); // TODO: should be uncommented as soon as cert-manager is fixed @@ -212,21 +216,17 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { assert_eq!(con, true); }*/ - let ret = - environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_for_deletion, logger); + let ret = environment_delete.delete_environment(&env_action_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(ret, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments( - &context, - vec![environment, environment_delete], - secrets.clone(), - DO_TEST_REGION, - ) { + if let Err(e) = + clean_environments(&context, vec![environment, environment_delete], secrets.clone(), DO_TEST_REGION) + { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -254,8 +254,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = do_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -329,26 +332,25 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let env_action_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = environment_to_redeploy.deploy_environment( - Kind::Do, - &context_for_redeploy, &env_action_redeploy, logger.clone(), + &engine_config_for_redeploy, ); assert!(matches!(ret, TransactionResult::Ok)); @@ -365,7 +367,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let ret = environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let ret = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!( ret, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -376,7 +378,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -459,6 +461,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-do-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -523,15 +526,14 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mongodb_v4_0_deploy_a_working_dev_environment() { @@ -539,15 +541,14 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mongodb_v4_2_deploy_a_working_dev_environment() { @@ -557,6 +558,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-do-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -621,15 +623,14 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_mysql_v8_deploy_a_working_dev_environment() { @@ -691,15 +692,14 @@ fn private_redis_v5_deploy_a_working_dev_environment() { } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } #[cfg(feature = "test-do-self-hosted")] -#[ignore] #[named] #[test] fn private_redis_v6_deploy_a_working_dev_environment() { diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index d2b04f5d..756422a6 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -6,22 +6,69 @@ use self::test_utilities::utilities::{ engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; +use std::thread; +use std::time::Duration; use test_utilities::common::Infrastructure; -use test_utilities::digitalocean::container_registry_digital_ocean; -use test_utilities::utilities::{build_platform_local_docker, context}; +use test_utilities::digitalocean::do_default_engine_config; +use test_utilities::utilities::context; use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on DigitalOcean infrastructure. // This cluster should be live in order to have those tests passing properly. -#[cfg(feature = "test-do-self-hosted")] +#[cfg(feature = "test-do-minimal")] +#[named] +#[test] +fn digitalocean_test_build_phase() { + let test_name = function_name!(); + engine_run_test(|| { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + + let secrets = FuncTestsSecrets::new(); + let logger = logger(); + let context = context( + secrets + .DIGITAL_OCEAN_TEST_ORGANIZATION_ID + .as_ref() + .expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"), + secrets + .DIGITAL_OCEAN_TEST_CLUSTER_ID + .as_ref() + .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), + ); + let engine_config = do_default_engine_config(&context, logger.clone()); + let environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .as_ref() + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + let env_action = environment.clone(); + + let (env, ret) = environment.build_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} + +#[cfg(feature = "test-do-minimal")] #[named] #[test] fn digitalocean_doks_deploy_a_working_environment_with_no_router() { @@ -44,7 +91,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -59,17 +108,16 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - let ret = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -77,95 +125,6 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() { }) } -#[cfg(feature = "test-do-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .DIGITAL_OCEAN_TEST_ORGANIZATION_ID - .as_ref() - .expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"), - secrets - .DIGITAL_OCEAN_TEST_CLUSTER_ID - .as_ref() - .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), - ); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let docr = container_registry_digital_ocean(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match docr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match docr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match docr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match docr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); - }) -} - #[cfg(feature = "test-do-self-hosted")] #[named] #[test] @@ -189,7 +148,9 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -204,16 +165,20 @@ fn digitalocean_doks_deploy_a_not_working_environment_with_no_router() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); - assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let ret = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); - assert!(matches!(ret, TransactionResult::UnrecoverableError(_, _))); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + let result = + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); + assert!(matches!( + result, + TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) + )); + + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -244,7 +209,9 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -254,10 +221,10 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { .as_str(), ); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); - let ret = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -270,7 +237,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let ret = environment.pause_environment(Kind::Do, &context_for_delete, &env_action, logger.clone()); + let ret = environment.pause_environment(&env_action, logger.clone(), &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -278,7 +245,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { context.clone(), Kind::Do, environment.clone(), - selector.clone().as_str(), + selector.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); @@ -286,7 +253,8 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let ret = environment.deploy_environment(Kind::Do, &ctx_resume, &env_action, logger.clone()); + let engine_config_resume = do_default_engine_config(&ctx_resume, logger.clone()); + let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(ret, TransactionResult::Ok)); let ret = get_pods( @@ -300,10 +268,10 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let ret = environment.delete_environment(Kind::Do, &context_for_delete, &env_action, logger); + let ret = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -334,7 +302,9 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -361,22 +331,22 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -407,7 +377,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -420,17 +392,16 @@ fn digitalocean_doks_deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -461,8 +432,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); - + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -487,15 +459,15 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -506,10 +478,10 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -540,8 +512,11 @@ fn digitalocean_doks_redeploy_same_app() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = do_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = do_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -567,7 +542,7 @@ fn digitalocean_doks_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -575,11 +550,11 @@ fn digitalocean_doks_redeploy_same_app() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_redeploy = environment_redeploy.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Do, environment.clone(), secrets.clone()) { @@ -595,12 +570,11 @@ fn digitalocean_doks_redeploy_same_app() { context.clone(), Kind::Do, environment_check1, - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); - let result = - environment_redeploy.deploy_environment(Kind::Do, &context_bis, &env_action_redeploy, logger.clone()); + let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -614,10 +588,10 @@ fn digitalocean_doks_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -648,8 +622,11 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = do_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -672,30 +649,28 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; // environment actions - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_not_working = environment_for_not_working.clone(); + let env_action_delete = environment_for_delete.clone(); let result = environment_for_not_working.deploy_environment( - Kind::Do, - &context_for_not_working, &env_action_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -729,6 +704,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -740,6 +716,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = do_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -751,32 +728,33 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = do_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_not_working_1 = not_working_env_1.clone(); + let env_action_not_working_2 = not_working_env_2.clone(); + let env_action_delete = delete_env.clone(); // OK - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback let result = not_working_env_1.deploy_environment( - Kind::Do, - &context_for_not_working_1, &env_action_not_working_1, logger.clone(), + &engine_config_for_not_working_1, ); assert!(matches!( result, @@ -785,10 +763,9 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { // FAIL and Rollback again let result = not_working_env_2.deploy_environment( - Kind::Do, - &context_for_not_working_2, &env_action_not_working_2, logger.clone(), + &engine_config_for_not_working_2, ); assert!(matches!( result, @@ -796,13 +773,13 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -833,6 +810,7 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { .as_ref() .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -843,19 +821,20 @@ fn digitalocean_doks_deploy_a_non_working_environment_with_no_failover() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_delete = delete_env.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment(Kind::Do, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } @@ -888,7 +867,9 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = do_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = do_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -901,20 +882,22 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(10)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); let result = - environment_for_delete.delete_environment(Kind::Do, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), DO_TEST_REGION) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, DO_TEST_REGION) { warn!("cannot clean environments, error: {:?}", e); } diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index 35102116..653e2ea2 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -2,18 +2,15 @@ extern crate test_utilities; use self::test_utilities::common::ClusterDomain; use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterTestType}; #[cfg(feature = "test-do-infra")] fn create_and_destroy_doks_cluster( region: DoRegion, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -27,11 +24,10 @@ fn create_and_destroy_doks_cluster( logger(), region.as_str(), None, - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, None, None, ) @@ -43,10 +39,8 @@ fn create_and_destroy_doks_cluster( #[test] fn create_and_destroy_doks_cluster_ams_3() { let region = DoRegion::Amsterdam3; - let secrets = FuncTestsSecrets::new(); create_and_destroy_doks_cluster( region, - secrets, ClusterTestType::Classic, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, @@ -60,10 +54,8 @@ fn create_and_destroy_doks_cluster_ams_3() { #[ignore] fn create_upgrade_and_destroy_doks_cluster_in_nyc_3() { let region = DoRegion::NewYorkCity3; - let secrets = FuncTestsSecrets::new(); create_and_destroy_doks_cluster( region, - secrets, ClusterTestType::WithUpgrade, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index e5bc231a..3e6b9e8d 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -1,4 +1,4 @@ -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; +use qovery_engine::models::digital_ocean::DoRegion; use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use qovery_engine::object_storage::ObjectStorage; use tempfile::NamedTempFile; @@ -18,8 +18,8 @@ fn test_delete_bucket_hard_delete_strategy() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -49,8 +49,8 @@ fn test_delete_bucket_empty_strategy() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::Empty, ); @@ -85,8 +85,8 @@ fn test_create_bucket() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -116,8 +116,8 @@ fn test_recreate_bucket() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -154,8 +154,8 @@ fn test_put_file() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -178,10 +178,7 @@ fn test_put_file() { // validate: assert!(result.is_ok()); - assert_eq!( - true, - spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok() - ); + assert_eq!(true, spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok()); // clean-up: spaces @@ -200,8 +197,8 @@ fn test_get_file() { context, "test-fake".to_string(), "test-fake".to_string(), - secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap().to_string(), - secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap().to_string(), + secrets.DIGITAL_OCEAN_SPACES_ACCESS_ID.unwrap(), + secrets.DIGITAL_OCEAN_SPACES_SECRET_ID.unwrap(), TEST_REGION, BucketDeleteStrategy::HardDelete, ); @@ -226,10 +223,7 @@ fn test_get_file() { // validate: assert!(result.is_ok()); - assert_eq!( - true, - spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok() - ); + assert_eq!(true, spaces.get(bucket_name.as_str(), object_key.as_str(), false).is_ok()); // clean-up: spaces diff --git a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs index 934ef4f8..c1aaf385 100644 --- a/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs +++ b/tests/digitalocean/do_utility_kubernetes_doks_test_cluster.rs @@ -2,10 +2,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::DO; +use test_utilities::digitalocean::do_default_engine_config; use tracing::{span, Level}; -use self::test_utilities::common::Cluster; use qovery_engine::transaction::{Transaction, TransactionResult}; // Warning: This test shouldn't be ran by CI @@ -36,7 +35,7 @@ fn create_digitalocean_kubernetes_doks_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = DO::docker_cr_engine(&context, logger.clone()); + let engine = do_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy @@ -78,7 +77,7 @@ fn destroy_digitalocean_kubernetes_doks_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = DO::docker_cr_engine(&context, logger.clone()); + let engine = do_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Destroy diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index f1d3985f..2851c16f 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -1,7 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -29,7 +28,7 @@ fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment; engine_run_test(|| { cluster_test( @@ -39,11 +38,10 @@ fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { logger, region.as_str(), None, - secrets.clone(), ClusterTestType::Classic, DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), None, Some(&env_action), ) diff --git a/tests/docker/multi_stage_simple/Dockerfile b/tests/docker/multi_stage_simple/Dockerfile new file mode 100644 index 00000000..10d224cb --- /dev/null +++ b/tests/docker/multi_stage_simple/Dockerfile @@ -0,0 +1,10 @@ + +FROM golang:1.16 AS build + +COPY hello.go /go/src/project/hello.go +WORKDIR /go/src/project +RUN go build hello.go + +FROM scratch +COPY --from=build /go/src/project/hello /bin/hello +ENTRYPOINT ["/bin/hello"] diff --git a/tests/docker/multi_stage_simple/Dockerfile.buildkit b/tests/docker/multi_stage_simple/Dockerfile.buildkit new file mode 100644 index 00000000..5219fdf1 --- /dev/null +++ b/tests/docker/multi_stage_simple/Dockerfile.buildkit @@ -0,0 +1,10 @@ +FROM golang:1.16 AS build + +# ../ is not valid if using old docker engine, only allowed with buildkit +COPY ../hello.go /go/src/project/hello.go +WORKDIR /go/src/project +RUN go build hello.go + +FROM scratch +COPY --from=build /go/src/project/hello /bin/hello +ENTRYPOINT ["/bin/hello"] diff --git a/tests/docker/multi_stage_simple/hello.go b/tests/docker/multi_stage_simple/hello.go new file mode 100644 index 00000000..a932edea --- /dev/null +++ b/tests/docker/multi_stage_simple/hello.go @@ -0,0 +1,7 @@ +package main + +import "fmt" + +func main() { + fmt.Println("hello world") +} diff --git a/tests/lib.rs b/tests/lib.rs index 1d73348f..bbc13eb3 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -1,6 +1,6 @@ #[macro_use] extern crate maplit; + mod aws; mod digitalocean; mod scaleway; -mod unit; diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index a0c80992..8ea49c8a 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,9 +1,10 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; -use qovery_engine::build_platform::Image; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; +use qovery_engine::io_models::NoOpProgressListener; +use qovery_engine::models::scaleway::ScwZone; +use std::sync::Arc; use tracing::debug; use uuid::Uuid; @@ -45,19 +46,11 @@ fn test_get_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - ); - - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap(); + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); @@ -73,10 +66,7 @@ fn test_get_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); // clean-up: container_registry.delete_registry_namespace(&image).unwrap(); @@ -103,18 +93,11 @@ fn test_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - ); + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap(); - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; + let image = registry_name.to_string(); // execute: debug!("test_create_registry_namespace - {}", region); @@ -154,19 +137,11 @@ fn test_delete_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - ); - - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap(); + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); @@ -200,19 +175,11 @@ fn test_get_or_create_registry_namespace() { scw_secret_key.as_str(), scw_default_project_id.as_str(), region, - ); - - let image = Image { - application_id: "1234".to_string(), - name: registry_name.to_string(), - tag: "tag123".to_string(), - commit_id: "commit_id".to_string(), - registry_name: Some(registry_name.to_string()), - registry_secret: None, - registry_url: None, - registry_docker_json_config: None, - }; + Arc::new(Box::new(NoOpProgressListener {})), + ) + .unwrap(); + let image = registry_name.to_string(); container_registry .create_registry_namespace(&image) .expect("error while creating registry namespace"); @@ -230,10 +197,7 @@ fn test_get_or_create_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); let added_registry_result = container_registry.get_registry_namespace(&image); assert_eq!(true, added_registry_result.is_some()); @@ -251,10 +215,7 @@ fn test_get_or_create_registry_namespace() { assert_eq!(true, result.status.is_some()); let status = result.status.unwrap(); - assert_eq!( - scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, - status, - ); + assert_eq!(scaleway_api_rs::models::scaleway_registry_v1_namespace::Status::Ready, status,); let added_registry_result = container_registry.get_registry_namespace(&image); assert_eq!(true, added_registry_result.is_some()); diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 1fdc5b5b..128e8917 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -2,20 +2,18 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{ - Action, Clone2, Context, Database, DatabaseKind, DatabaseMode, Environment, EnvironmentAction, Port, Protocol, -}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, generate_password, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; +use test_utilities::common::test_db; use test_utilities::common::{database_test_environment, Infrastructure}; -use test_utilities::common::{test_db, working_minimal_environment}; use test_utilities::scaleway::{ - clean_environments, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, + clean_environments, scw_default_engine_config, SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_SELF_HOSTED_DATABASE_DISK_TYPE, SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE, SCW_TEST_ZONE, }; @@ -51,7 +49,9 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environment_3_apps_3_routers_3_databases( &context, secrets @@ -66,14 +66,13 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries @@ -81,7 +80,7 @@ fn deploy_an_environment_with_3_databases_and_3_apps() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -110,7 +109,9 @@ fn deploy_an_environment_with_db_and_pause_it() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let environment = test_utilities::common::environnement_2_app_2_routers_1_psql( &context, secrets @@ -125,13 +126,13 @@ fn deploy_an_environment_with_db_and_pause_it() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = environment.pause_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.pause_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this db @@ -140,22 +141,22 @@ fn deploy_an_environment_with_db_and_pause_it() { context.clone(), ProviderKind::Scw, environment.clone(), - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), true); let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger.clone()); + environment_delete.delete_environment(&env_action_delete, logger.clone(), &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments(&context, vec![environment], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -185,7 +186,9 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let test_domain = secrets .DEFAULT_TEST_DOMAIN .as_ref() @@ -208,27 +211,24 @@ fn postgresql_deploy_a_working_development_environment_with_all_options() { environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_deletion = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_for_deletion, logger); + environment_delete.delete_environment(&env_action_for_deletion, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); // delete images created during test from registries - if let Err(e) = clean_environments( - &context, - vec![environment, environment_delete], - secrets.clone(), - SCW_TEST_ZONE, - ) { + if let Err(e) = + clean_environments(&context, vec![environment, environment_delete], secrets.clone(), SCW_TEST_ZONE) + { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -258,8 +258,11 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_redeploy = context.clone_not_same_execution_id(); + let engine_config_for_redeploy = scw_default_engine_config(&context_for_redeploy, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -334,26 +337,25 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); let environment_check = environment.clone(); - let env_action_redeploy = EnvironmentAction::Environment(environment_to_redeploy.clone()); + let env_action_redeploy = environment_to_redeploy.clone(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = environment_to_redeploy.deploy_environment( - Kind::Scw, - &context_for_redeploy, &env_action_redeploy, logger.clone(), + &engine_config_for_redeploy, ); assert!(matches!(result, TransactionResult::Ok)); @@ -370,7 +372,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { (false, _) => assert!(false), } - let result = environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) @@ -381,7 +383,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { warn!("cannot clean environments, error: {:?}", e); } - return test_name.to_string(); + test_name.to_string() }) } @@ -434,6 +436,7 @@ fn private_postgresql_v10_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v10_deploy_a_working_dev_environment() { test_postgresql_configuration("10", function_name!(), CONTAINER, true); } @@ -448,6 +451,7 @@ fn private_postgresql_v11_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v11_deploy_a_working_dev_environment() { test_postgresql_configuration("11", function_name!(), CONTAINER, true); } @@ -462,6 +466,7 @@ fn private_postgresql_v12_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_postgresql_v12_deploy_a_working_dev_environment() { test_postgresql_configuration("12", function_name!(), CONTAINER, true); } @@ -594,6 +599,7 @@ fn private_mongodb_v3_6_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v3_6_deploy_a_working_dev_environment() { test_mongodb_configuration("3.6", function_name!(), CONTAINER, true); } @@ -608,6 +614,7 @@ fn private_mongodb_v4_0_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_0_deploy_a_working_dev_environment() { test_mongodb_configuration("4.0", function_name!(), CONTAINER, true); } @@ -622,6 +629,7 @@ fn private_mongodb_v4_2_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mongodb_v4_2_deploy_a_working_dev_environment() { test_mongodb_configuration("4.2", function_name!(), CONTAINER, true); } @@ -689,6 +697,7 @@ fn private_mysql_v5_7_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_mysql_v5_7_deploy_a_working_dev_environment() { test_mysql_configuration("5.7", function_name!(), CONTAINER, true); } @@ -773,6 +782,7 @@ fn private_redis_v5_deploy_a_working_dev_environment() { #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] +#[ignore] fn public_redis_v5_deploy_a_working_dev_environment() { test_redis_configuration("5", function_name!(), CONTAINER, true); } diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 870c70f4..2fb6cdf1 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -6,22 +6,70 @@ use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; use ::function_name::named; -use qovery_engine::build_platform::{BuildPlatform, CacheResult}; use qovery_engine::cloud_provider::Kind; -use qovery_engine::container_registry::{ContainerRegistry, PullResult}; -use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; -use std::time::SystemTime; +use std::thread; +use std::time::Duration; use test_utilities::common::Infrastructure; -use test_utilities::scaleway::container_registry_scw; -use test_utilities::utilities::build_platform_local_docker; +use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, warn, Level}; // Note: All those tests relies on a test cluster running on Scaleway infrastructure. // This cluster should be live in order to have those tests passing properly. -#[cfg(feature = "test-scw-self-hosted")] +#[cfg(feature = "test-scw-minimal")] +#[named] +#[test] +fn scaleway_test_build_phase() { + let test_name = function_name!(); + engine_run_test(|| { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + + let logger = logger(); + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .SCALEWAY_TEST_ORGANIZATION_ID + .as_ref() + .expect("SCALEWAY_TEST_ORGANIZATION_ID") + .as_str(), + secrets + .SCALEWAY_TEST_CLUSTER_ID + .as_ref() + .expect("SCALEWAY_TEST_CLUSTER_ID") + .as_str(), + ); + let engine_config = scw_default_engine_config(&context, logger.clone()); + let environment = test_utilities::common::working_minimal_environment( + &context, + secrets + .DEFAULT_TEST_DOMAIN + .as_ref() + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str(), + ); + + let env_action = environment.clone(); + + let (env, ret) = environment.build_environment(&env_action, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + // Check the the image exist in the registry + let img_exist = engine_config + .container_registry() + .does_image_exists(&env.applications[0].get_build().image); + assert!(img_exist); + + test_name.to_string() + }) +} + +#[cfg(feature = "test-scw-minimal")] #[named] #[test] fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { @@ -46,7 +94,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -61,17 +111,17 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { environment_for_delete.routers = vec![]; environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -79,97 +129,6 @@ fn scaleway_kapsule_deploy_a_working_environment_with_no_router() { }) } -#[cfg(feature = "test-scw-self-hosted")] -#[named] -#[test] -fn test_build_cache() { - let test_name = function_name!(); - engine_run_test(|| { - init(); - let span = span!(Level::INFO, "test", name = test_name); - let _enter = span.enter(); - - let secrets = FuncTestsSecrets::new(); - let context = context( - secrets - .SCALEWAY_TEST_ORGANIZATION_ID - .as_ref() - .expect("SCALEWAY_TEST_ORGANIZATION_ID") - .as_str(), - secrets - .SCALEWAY_TEST_CLUSTER_ID - .as_ref() - .expect("SCALEWAY_TEST_CLUSTER_ID") - .as_str(), - ); - - let environment = test_utilities::common::working_minimal_environment( - &context, - secrets - .DEFAULT_TEST_DOMAIN - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str(), - ); - - let scr = container_registry_scw(&context); - let local_docker = build_platform_local_docker(&context, logger()); - let app = environment.applications.first().unwrap(); - let image = app.to_image(); - - let app_build = app.to_build(); - let _ = match local_docker.has_cache(&app_build) { - Ok(CacheResult::Hit) => assert!(false), - Ok(CacheResult::Miss(_)) => assert!(true), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let _ = match scr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(false), - PullResult::None => assert!(true), - }; - - let cancel_task = || false; - let build_result = local_docker.build(app.to_build(), false, &cancel_task).unwrap(); - - let _ = match scr.push(&build_result.build.image, false) { - Ok(_) => assert!(true), - Err(_) => assert!(false), - }; - - // TODO clean local docker cache - - let start_pull_time = SystemTime::now(); - let _ = match scr.pull(&build_result.build.image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap(); - - let _ = match local_docker.has_cache(&build_result.build) { - Ok(CacheResult::Hit) => assert!(true), - Ok(CacheResult::Miss(_)) => assert!(false), - Ok(CacheResult::MissWithoutParentBuild) => assert!(false), - Err(_) => assert!(false), - }; - - let start_pull_time = SystemTime::now(); - let _ = match scr.pull(&image).unwrap() { - PullResult::Some(_) => assert!(true), - PullResult::None => assert!(false), - }; - - let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap(); - - if pull_duration_2.as_millis() > pull_duration.as_millis() { - assert!(false); - } - - return test_name.to_string(); - }) -} - #[cfg(feature = "test-scw-self-hosted")] #[named] #[test] @@ -195,7 +154,9 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::non_working_environment( &context, @@ -210,20 +171,20 @@ fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!( result, TransactionResult::Ok | TransactionResult::UnrecoverableError(_, _) )); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -256,7 +217,9 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -266,10 +229,10 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { .as_str(), ); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -282,7 +245,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment(Kind::Scw, &context_for_delete, &env_action, logger.clone()); + let result = environment.pause_environment(&env_action, logger.clone(), &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -298,7 +261,8 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &env_action, logger.clone()); + let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -312,10 +276,10 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = environment.delete_environment(Kind::Scw, &context_for_delete, &env_action, logger); + let result = environment.delete_environment(&env_action, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -348,7 +312,9 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -375,22 +341,22 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -423,7 +389,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -436,17 +404,16 @@ fn scaleway_kapsule_deploy_a_working_environment_with_domain() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + let result = environment_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -479,7 +446,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, @@ -505,15 +474,15 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -524,11 +493,10 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { Err(_) => assert!(false), }; - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -560,7 +528,9 @@ fn deploy_a_working_environment_and_pause_it() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -570,10 +540,10 @@ fn deploy_a_working_environment_and_pause_it() { .as_str(), ); - let ea = EnvironmentAction::Environment(environment.clone()); + let ea = environment.clone(); let selector = format!("appId={}", environment.applications[0].id); - let result = environment.deploy_environment(Kind::Scw, &context, &ea, logger.clone()); + let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); let ret = get_pods( @@ -586,7 +556,7 @@ fn deploy_a_working_environment_and_pause_it() { assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); - let result = environment.pause_environment(Kind::Scw, &context_for_delete, &ea, logger.clone()); + let result = environment.pause_environment(&ea, logger.clone(), &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); // Check that we have actually 0 pods running for this app @@ -602,23 +572,18 @@ fn deploy_a_working_environment_and_pause_it() { // Check we can resume the env let ctx_resume = context.clone_not_same_execution_id(); - let result = environment.deploy_environment(Kind::Scw, &ctx_resume, &ea, logger.clone()); + let engine_config_resume = scw_default_engine_config(&ctx_resume, logger.clone()); + let result = environment.deploy_environment(&ea, logger.clone(), &engine_config_resume); assert!(matches!(result, TransactionResult::Ok)); - let ret = get_pods( - context.clone(), - Kind::Scw, - environment.clone(), - selector.as_str(), - secrets.clone(), - ); + let ret = get_pods(context, Kind::Scw, environment.clone(), selector.as_str(), secrets); assert_eq!(ret.is_ok(), true); assert_eq!(ret.unwrap().items.is_empty(), false); // Cleanup - let result = environment.delete_environment(Kind::Scw, &context_for_delete, &ea, logger); + let result = environment.delete_environment(&ea, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() }) } @@ -647,13 +612,15 @@ fn scaleway_kapsule_redeploy_same_app() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_bis = context.clone_not_same_execution_id(); + let engine_config_bis = scw_default_engine_config(&context_bis, logger.clone()); let context_for_deletion = context.clone_not_same_execution_id(); + let engine_config_for_deletion = scw_default_engine_config(&context_for_deletion, logger.clone()); let mut environment = test_utilities::common::working_minimal_environment( &context, secrets - .clone() .DEFAULT_TEST_DOMAIN .as_ref() .expect("DEFAULT_TEST_DOMAIN is not set in secrets") @@ -675,7 +642,7 @@ fn scaleway_kapsule_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -683,11 +650,11 @@ fn scaleway_kapsule_redeploy_same_app() { let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_delete.clone()); + let env_action = environment.clone(); + let env_action_redeploy = environment_redeploy.clone(); + let env_action_delete = environment_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); match get_pvc(context.clone(), Kind::Scw, environment.clone(), secrets.clone()) { @@ -703,12 +670,11 @@ fn scaleway_kapsule_redeploy_same_app() { context.clone(), Kind::Scw, environment_check1, - app_name.clone().as_str(), + app_name.as_str(), secrets.clone(), ); - let result = - environment_redeploy.deploy_environment(Kind::Scw, &context_bis, &env_action_redeploy, logger.clone()); + let result = environment_redeploy.deploy_environment(&env_action_redeploy, logger.clone(), &engine_config_bis); assert!(matches!(result, TransactionResult::Ok)); let (_, number2) = is_pod_restarted_env( @@ -722,11 +688,10 @@ fn scaleway_kapsule_redeploy_same_app() { // nothing changed in the app, so, it shouldn't be restarted assert!(number.eq(&number2)); - let result = - environment_delete.delete_environment(Kind::Scw, &context_for_deletion, &env_action_delete, logger); + let result = environment_delete.delete_environment(&env_action_delete, logger, &engine_config_for_deletion); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -759,8 +724,11 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_not_working = context.clone_not_same_execution_id(); + let engine_config_for_not_working = scw_default_engine_config(&context_for_not_working, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); // env part generation let environment = test_utilities::common::working_minimal_environment( @@ -783,32 +751,30 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; // environment actions - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working.clone()); - let env_action_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_not_working = environment_for_not_working.clone(); + let env_action_delete = environment_for_delete.clone(); let result = environment_for_not_working.deploy_environment( - Kind::Scw, - &context_for_not_working, &env_action_not_working, logger.clone(), + &engine_config_for_not_working, ); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = environment_for_delete.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -845,6 +811,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::working_minimal_environment( &context, secrets @@ -856,6 +823,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { // not working 1 let context_for_not_working_1 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_1 = scw_default_engine_config(&context_for_not_working_1, logger.clone()); let mut not_working_env_1 = environment.clone(); not_working_env_1.applications = not_working_env_1 .applications @@ -867,32 +835,33 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); + let engine_config_for_not_working_2 = scw_default_engine_config(&context_for_not_working_2, logger.clone()); let not_working_env_2 = not_working_env_1.clone(); // work for delete let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1.clone()); - let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_not_working_1 = not_working_env_1.clone(); + let env_action_not_working_2 = not_working_env_2.clone(); + let env_action_delete = delete_env.clone(); // OK - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); // FAIL and rollback let result = not_working_env_1.deploy_environment( - Kind::Scw, - &context_for_not_working_1, &env_action_not_working_1, logger.clone(), + &engine_config_for_not_working_1, ); assert!(matches!( result, @@ -901,10 +870,9 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { // FAIL and Rollback again let result = not_working_env_2.deploy_environment( - Kind::Scw, - &context_for_not_working_2, &env_action_not_working_2, logger.clone(), + &engine_config_for_not_working_2, ); assert!(matches!( result, @@ -912,13 +880,13 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { )); // Should be working - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); - let result = delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -952,6 +920,7 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { .expect("SCALEWAY_TEST_CLUSTER_ID") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let environment = test_utilities::common::non_working_environment( &context, secrets @@ -962,19 +931,20 @@ fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() { ); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let mut delete_env = environment.clone(); delete_env.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_delete = EnvironmentAction::Environment(delete_env.clone()); + let env_action = environment.clone(); + let env_action_delete = delete_env.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::UnrecoverableError(_, _))); - let result = delete_env.delete_environment(Kind::Scw, &context_for_delete, &env_action_delete, logger); + let result = delete_env.delete_environment(&env_action_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } @@ -1007,7 +977,9 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { .expect("SCALEWAY_TEST_CLUSTER_ID is not set in secrets") .as_str(), ); + let engine_config = scw_default_engine_config(&context, logger.clone()); let context_for_delete = context.clone_not_same_execution_id(); + let engine_config_for_delete = scw_default_engine_config(&context_for_delete, logger.clone()); let environment = test_utilities::common::environment_only_http_server_router_with_sticky_session( &context, secrets @@ -1020,20 +992,22 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() { let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; - let env_action = EnvironmentAction::Environment(environment.clone()); - let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete.clone()); + let env_action = environment.clone(); + let env_action_for_delete = environment_for_delete.clone(); - let result = environment.deploy_environment(Kind::Scw, &context, &env_action, logger.clone()); + let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); + // let time for nginx to reload the config + thread::sleep(Duration::from_secs(10)); // checking cookie is properly set on the app assert!(routers_sessions_are_sticky(environment.routers.clone())); let result = - environment_for_delete.delete_environment(Kind::Scw, &context_for_delete, &env_action_for_delete, logger); + environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete); assert!(matches!(result, TransactionResult::Ok)); - if let Err(e) = clean_environments(&context, vec![environment.clone()], secrets.clone(), SCW_TEST_ZONE) { + if let Err(e) = clean_environments(&context, vec![environment], secrets, SCW_TEST_ZONE) { warn!("cannot clean environments, error: {:?}", e); } diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 35a01b98..952cc24d 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -1,19 +1,16 @@ extern crate test_utilities; use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{ - context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets, -}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-scw-infra")] fn create_and_destroy_kapsule_cluster( zone: ScwZone, - secrets: FuncTestsSecrets, test_type: ClusterTestType, major_boot_version: u8, minor_boot_version: u8, @@ -28,11 +25,10 @@ fn create_and_destroy_kapsule_cluster( logger(), zone.as_str(), None, - secrets, test_type, major_boot_version, minor_boot_version, - ClusterDomain::Default, + &ClusterDomain::Default, vpc_network_mode, None, ) @@ -45,10 +41,8 @@ fn create_and_destroy_kapsule_cluster( #[test] fn create_and_destroy_kapsule_cluster_par_1() { let zone = ScwZone::Paris1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -62,10 +56,8 @@ fn create_and_destroy_kapsule_cluster_par_1() { #[test] fn create_and_destroy_kapsule_cluster_par_2() { let zone = ScwZone::Paris2; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -79,10 +71,8 @@ fn create_and_destroy_kapsule_cluster_par_2() { #[test] fn create_pause_and_destroy_kapsule_cluster_ams_1() { let zone = ScwZone::Amsterdam1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithPause, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -96,10 +86,8 @@ fn create_pause_and_destroy_kapsule_cluster_ams_1() { #[test] fn create_and_destroy_kapsule_cluster_war_1() { let zone = ScwZone::Warsaw1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -115,10 +103,8 @@ fn create_and_destroy_kapsule_cluster_war_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_par_1() { let zone = ScwZone::Paris1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -134,10 +120,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_par_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_par_2() { let zone = ScwZone::Paris2; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -153,10 +137,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_par_2() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_ams_1() { let zone = ScwZone::Amsterdam1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, @@ -172,10 +154,8 @@ fn create_upgrade_and_destroy_kapsule_cluster_in_ams_1() { #[ignore] fn create_upgrade_and_destroy_kapsule_cluster_in_war_1() { let zone = ScwZone::Warsaw1; - let secrets = FuncTestsSecrets::new(); create_and_destroy_kapsule_cluster( zone, - secrets, ClusterTestType::WithUpgrade, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, diff --git a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs index 0e1bbb46..70816f19 100644 --- a/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs +++ b/tests/scaleway/scw_utility_kubernetes_kapsule_test_cluster.rs @@ -2,10 +2,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, engine_run_test, init, logger, FuncTestsSecrets}; use ::function_name::named; +use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, Level}; -use self::test_utilities::common::Cluster; -use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::transaction::{Transaction, TransactionResult}; // Warning: This test shouldn't be ran by CI @@ -36,7 +35,7 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = Scaleway::docker_cr_engine(&context, logger.clone()); + let engine = scw_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy @@ -78,7 +77,7 @@ fn destroy_scaleway_kubernetes_kapsule_test_cluster() { let logger = logger(); let context = context(organization_id.as_str(), cluster_id.as_str()); - let engine = Scaleway::docker_cr_engine(&context, logger.clone()); + let engine = scw_default_engine_config(&context, logger.clone()); let mut tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Destroy diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index da76f998..bef7262d 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,7 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::EnvironmentAction; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -27,7 +26,7 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { ); let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = EnvironmentAction::Environment(environment.clone()); + let env_action = environment; engine_run_test(|| { cluster_test( @@ -37,11 +36,10 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { logger, zone.as_str(), None, - secrets.clone(), ClusterTestType::Classic, SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION, - ClusterDomain::Custom(cluster_domain), + &ClusterDomain::Custom(cluster_domain), None, Some(&env_action), ) diff --git a/tests/unit/mod.rs b/tests/unit/mod.rs deleted file mode 100644 index 8b137891..00000000 --- a/tests/unit/mod.rs +++ /dev/null @@ -1 +0,0 @@ -