From 68fcbc73921b632061d374ee90285f290a6f1c1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Mon, 28 Mar 2022 16:56:18 +0200 Subject: [PATCH 001/122] Refacto application to avoid duplication (#669) --- src/build_platform/local_docker.rs | 4 +- src/build_platform/mod.rs | 2 +- src/cloud_provider/aws/databases/mongodb.rs | 4 +- src/cloud_provider/aws/databases/mysql.rs | 4 +- .../aws/databases/postgresql.rs | 4 +- src/cloud_provider/aws/databases/redis.rs | 4 +- src/cloud_provider/aws/databases/utilities.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 4 +- src/cloud_provider/aws/mod.rs | 3 +- src/cloud_provider/aws/regions.rs | 2 +- src/cloud_provider/aws/router.rs | 2 +- .../digitalocean/application.rs | 504 --------------- .../digitalocean/databases/mongodb.rs | 4 +- .../digitalocean/databases/mysql.rs | 4 +- .../digitalocean/databases/postgresql.rs | 4 +- .../digitalocean/databases/redis.rs | 4 +- .../digitalocean/kubernetes/mod.rs | 6 +- src/cloud_provider/digitalocean/mod.rs | 3 +- .../digitalocean/network/vpc.rs | 4 +- src/cloud_provider/digitalocean/router.rs | 2 +- src/cloud_provider/environment.rs | 7 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/mod.rs | 2 +- src/cloud_provider/scaleway/application.rs | 599 ------------------ .../scaleway/databases/mongodb.rs | 4 +- .../scaleway/databases/mysql.rs | 4 +- .../scaleway/databases/postgresql.rs | 4 +- .../scaleway/databases/redis.rs | 4 +- .../scaleway/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 6 +- src/cloud_provider/scaleway/mod.rs | 3 +- src/cloud_provider/scaleway/router.rs | 2 +- src/cloud_provider/service.rs | 15 +- src/cloud_provider/utilities.rs | 2 +- src/container_registry/docr.rs | 2 +- src/container_registry/ecr.rs | 4 +- src/container_registry/mod.rs | 2 +- .../scaleway_container_registry.rs | 5 +- src/dns_provider/cloudflare.rs | 2 +- src/dns_provider/mod.rs | 2 +- src/engine.rs | 2 +- src/errors/mod.rs | 2 +- src/events/mod.rs | 2 +- src/{models.rs => io_models.rs} | 157 ++--- src/lib.rs | 1 + src/logger.rs | 4 +- .../aws => models}/application.rs | 528 ++++++++------- src/models/aws/application.rs | 90 +++ src/models/aws/mod.rs | 43 ++ src/models/digital_ocean/application.rs | 91 +++ src/models/digital_ocean/mod.rs | 126 ++++ src/models/mod.rs | 5 + src/models/scaleway/application.rs | 103 +++ src/models/scaleway/mod.rs | 207 ++++++ src/models/types.rs | 19 + src/object_storage/mod.rs | 2 +- src/object_storage/s3.rs | 2 +- src/object_storage/scaleway_object_storage.rs | 4 +- src/object_storage/spaces.rs | 4 +- src/transaction.rs | 9 +- test_utilities/src/aws.rs | 2 +- test_utilities/src/cloudflare.rs | 2 +- test_utilities/src/common.rs | 10 +- test_utilities/src/digitalocean.rs | 4 +- test_utilities/src/scaleway.rs | 4 +- test_utilities/src/utilities.rs | 8 +- tests/aws/aws_databases.rs | 6 +- tests/aws/aws_environment.rs | 14 +- tests/digitalocean/do_databases.rs | 6 +- tests/digitalocean/do_environment.rs | 12 +- tests/digitalocean/do_kubernetes.rs | 2 +- tests/digitalocean/do_spaces.rs | 2 +- tests/digitalocean/do_whole_enchilada.rs | 2 +- tests/scaleway/scw_container_registry.rs | 4 +- tests/scaleway/scw_databases.rs | 6 +- tests/scaleway/scw_environment.rs | 12 +- tests/scaleway/scw_kubernetes.rs | 2 +- tests/scaleway/scw_whole_enchilada.rs | 2 +- 78 files changed, 1191 insertions(+), 1561 deletions(-) delete mode 100644 src/cloud_provider/digitalocean/application.rs delete mode 100644 src/cloud_provider/scaleway/application.rs rename src/{models.rs => io_models.rs} (92%) rename src/{cloud_provider/aws => models}/application.rs (54%) create mode 100644 src/models/aws/application.rs create mode 100644 src/models/aws/mod.rs create mode 100644 src/models/digital_ocean/application.rs create mode 100644 src/models/digital_ocean/mod.rs create mode 100644 src/models/mod.rs create mode 100644 src/models/scaleway/application.rs create mode 100644 src/models/scaleway/mod.rs create mode 100644 src/models/types.rs diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 48490651..9728a235 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -17,10 +17,10 @@ use crate::cmd::docker::{ContainerImage, Docker, DockerError}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; const BUILD_DURATION_TIMEOUT_SEC: u64 = 30 * 60; diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 5804afa6..55a57eb0 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -5,8 +5,8 @@ use crate::cmd::command::CommandError; use crate::cmd::docker::DockerError; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; +use crate::io_models::{Context, Listen, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::{Context, Listen, QoveryIdentifier}; use crate::utilities::compute_image_tag; use std::fmt::{Display, Formatter, Result as FmtResult}; use std::hash::Hash; diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 230bc3d8..386a0b88 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -16,9 +16,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDbAws { diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs index d645aa6d..9dd5622f 100644 --- a/src/cloud_provider/aws/databases/mysql.rs +++ b/src/cloud_provider/aws/databases/mysql.rs @@ -17,9 +17,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, DatabaseKind, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, DatabaseKind, Listen, Listener, Listeners}; use ::function_name::named; pub struct MySQLAws { diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs index 07ec3678..0e881b2c 100644 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ b/src/cloud_provider/aws/databases/postgresql.rs @@ -17,9 +17,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct PostgreSQLAws { diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs index 9c90501b..3c7424ba 100644 --- a/src/cloud_provider/aws/databases/redis.rs +++ b/src/cloud_provider/aws/databases/redis.rs @@ -14,9 +14,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisAws { diff --git a/src/cloud_provider/aws/databases/utilities.rs b/src/cloud_provider/aws/databases/utilities.rs index 4b3b15e9..af52a341 100644 --- a/src/cloud_provider/aws/databases/utilities.rs +++ b/src/cloud_provider/aws/databases/utilities.rs @@ -1,6 +1,6 @@ use crate::cloud_provider::utilities::VersionsNumber; use crate::errors::CommandError; -use crate::models::DatabaseKind; +use crate::io_models::DatabaseKind; pub fn get_parameter_group_from_version( version: VersionsNumber, @@ -28,7 +28,7 @@ pub fn aws_final_snapshot_name(database_name: &str) -> String { mod tests_aws_databases_parameters { use crate::cloud_provider::aws::databases::utilities::get_parameter_group_from_version; use crate::cloud_provider::utilities::VersionsNumber; - use crate::models::DatabaseKind; + use crate::io_models::DatabaseKind; use std::str::FromStr; #[test] diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index c79927fb..09e93e87 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -38,11 +38,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString, }; +use crate::logger::Logger; use crate::object_storage::s3::S3; use crate::object_storage::ObjectStorage; use crate::string::terraform_list_format; diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index 3323de5e..7dba78d6 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -9,10 +9,9 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY}; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; use crate::runtime::block_on; -pub mod application; pub mod databases; pub mod kubernetes; pub mod regions; diff --git a/src/cloud_provider/aws/regions.rs b/src/cloud_provider/aws/regions.rs index 3ede17b0..5a719c07 100644 --- a/src/cloud_provider/aws/regions.rs +++ b/src/cloud_provider/aws/regions.rs @@ -1,6 +1,6 @@ use crate::cloud_provider::aws::regions::AwsZones::*; use crate::cloud_provider::aws::regions::RegionAndZoneErrors::*; -use crate::models::ToTerraformString; +use crate::io_models::ToTerraformString; use serde::{Deserialize, Serialize}; use std::fmt; use std::fmt::{Display, Formatter}; diff --git a/src/cloud_provider/aws/router.rs b/src/cloud_provider/aws/router.rs index 940cee44..769180f4 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/cloud_provider/aws/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::{to_engine_error, Timeout}; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterAws { diff --git a/src/cloud_provider/digitalocean/application.rs b/src/cloud_provider/digitalocean/application.rs deleted file mode 100644 index c6d2784b..00000000 --- a/src/cloud_provider/digitalocean/application.rs +++ /dev/null @@ -1,504 +0,0 @@ -use tera::Context as TeraContext; - -use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; -use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; -use std::fmt; -use std::str::FromStr; - -pub struct ApplicationDo { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl ApplicationDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - ApplicationDo { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - build, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl Helm for ApplicationDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name, self.id), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/digitalocean/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for ApplicationDo { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl Application for ApplicationDo { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl ToTransmitter for ApplicationDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for ApplicationDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - - // This is specific to digital ocean as it is them that create the registry secret - // we don't have the hand on it - context.insert("registry_secret", "do-container-registry-secret-for-cluster"); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::Standard => "do-block-storage", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for ApplicationDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for ApplicationDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } -} - -impl Listen for ApplicationDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - Standard, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum DoRegion { - NewYorkCity1, - NewYorkCity2, - NewYorkCity3, - Amsterdam2, - Amsterdam3, - SanFrancisco1, - SanFrancisco2, - SanFrancisco3, - Singapore, - London, - Frankfurt, - Toronto, - Bangalore, -} - -impl DoRegion { - pub fn as_str(&self) -> &str { - match self { - DoRegion::NewYorkCity1 => "nyc1", - DoRegion::NewYorkCity2 => "nyc2", - DoRegion::NewYorkCity3 => "nyc3", - DoRegion::Amsterdam2 => "ams2", - DoRegion::Amsterdam3 => "ams3", - DoRegion::SanFrancisco1 => "sfo1", - DoRegion::SanFrancisco2 => "sfo2", - DoRegion::SanFrancisco3 => "sfo3", - DoRegion::Singapore => "sgp1", - DoRegion::London => "lon1", - DoRegion::Frankfurt => "fra1", - DoRegion::Toronto => "tor1", - DoRegion::Bangalore => "blr1", - } - } -} - -impl fmt::Display for DoRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - DoRegion::NewYorkCity1 => write!(f, "nyc1"), - DoRegion::NewYorkCity2 => write!(f, "nyc2"), - DoRegion::NewYorkCity3 => write!(f, "nyc3"), - DoRegion::Amsterdam2 => write!(f, "ams2"), - DoRegion::Amsterdam3 => write!(f, "ams3"), - DoRegion::SanFrancisco1 => write!(f, "sfo1"), - DoRegion::SanFrancisco2 => write!(f, "sfo2"), - DoRegion::SanFrancisco3 => write!(f, "sfo3"), - DoRegion::Singapore => write!(f, "sgp1"), - DoRegion::London => write!(f, "lon1"), - DoRegion::Frankfurt => write!(f, "fra1"), - DoRegion::Toronto => write!(f, "tor1"), - DoRegion::Bangalore => write!(f, "blr1"), - } - } -} - -impl FromStr for DoRegion { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "nyc1" => Ok(DoRegion::NewYorkCity1), - "nyc2" => Ok(DoRegion::NewYorkCity2), - "nyc3" => Ok(DoRegion::NewYorkCity3), - "ams2" => Ok(DoRegion::Amsterdam2), - "ams3" => Ok(DoRegion::Amsterdam3), - "sfo1" => Ok(DoRegion::SanFrancisco1), - "sfo2" => Ok(DoRegion::SanFrancisco2), - "sfo3" => Ok(DoRegion::SanFrancisco3), - "sgp1" => Ok(DoRegion::Singapore), - "lon1" => Ok(DoRegion::London), - "fra1" => Ok(DoRegion::Frankfurt), - "tor1" => Ok(DoRegion::Toronto), - "blr1" => Ok(DoRegion::Bangalore), - _ => { - return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); - } - } - } -} diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs index 61c68859..1c179c2f 100644 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ b/src/cloud_provider/digitalocean/databases/mongodb.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDo { diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs index b6d19aad..5bffb434 100644 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ b/src/cloud_provider/digitalocean/databases/mysql.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MySQLDo { diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs index db1837d1..2b47a106 100644 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ b/src/cloud_provider/digitalocean/databases/postgresql.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct PostgresDo { diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs index 0ae77e6f..a06684d9 100644 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ b/src/cloud_provider/digitalocean/databases/redis.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisDo { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 55fa89e2..8d58135d 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; use crate::cloud_provider::aws::regions::AwsZones; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::kubernetes::doks_api::{ get_do_kubeconfig_by_cluster_name, get_do_latest_doks_slug_from_api, get_doks_info_from_name, @@ -40,11 +39,12 @@ use crate::events::Stage::Infrastructure; use crate::events::{ EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, }; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, ToHelmString, }; +use crate::logger::Logger; +use crate::models::digital_ocean::DoRegion; use crate::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 50ca7dc2..debccdf3 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -9,9 +9,8 @@ use crate::cloud_provider::{CloudProvider, Kind, TerraformStateCredentials}; use crate::constants::DIGITAL_OCEAN_TOKEN; use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod do_api_common; pub mod kubernetes; diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 563b9e26..76225984 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; -use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::models::vpc::{Vpc, Vpcs}; use crate::errors::CommandError; +use crate::models::digital_ocean::DoRegion; #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[serde(rename_all = "snake_case")] @@ -169,11 +169,11 @@ fn is_do_reserved_vpc_subnets(region: DoRegion, subnet: &str) -> bool { #[cfg(test)] mod tests_do_vpcs { - use crate::cloud_provider::digitalocean::application::DoRegion; use crate::cloud_provider::digitalocean::network::vpc::{ do_get_vpcs_from_api_output, get_do_vpc_from_name, get_do_vpc_from_subnet, get_random_available_subnet, is_do_reserved_vpc_subnets, VpcInitKind, }; + use crate::models::digital_ocean::DoRegion; fn do_get_vpc_json() -> String { // https://developers.digitalocean.com/documentation/v2/#retrieve-an-existing-load-balancer diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs index cd9662cc..1bc93804 100644 --- a/src/cloud_provider/digitalocean/router.rs +++ b/src/cloud_provider/digitalocean/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterDo { diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 624532a7..916e8759 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,4 +1,5 @@ -use crate::cloud_provider::service::{Action, Application, Database, Router, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; +use crate::models::application::IApplication; use crate::unit_conversion::cpu_string_to_float; pub struct Environment { @@ -8,7 +9,7 @@ pub struct Environment { pub owner_id: String, pub organization_id: String, pub action: Action, - pub applications: Vec>, + pub applications: Vec>, pub routers: Vec>, pub databases: Vec>, } @@ -20,7 +21,7 @@ impl Environment { owner_id: &str, organization_id: &str, action: Action, - applications: Vec>, + applications: Vec>, routers: Vec>, databases: Vec>, ) -> Self { diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 86d2f166..e1e90163 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -31,11 +31,11 @@ use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter}; use crate::fs::workspace_directory; -use crate::logger::Logger; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, }; +use crate::logger::Logger; use crate::object_storage::ObjectStorage; use crate::unit_conversion::{any_to_mi, cpu_string_to_float}; @@ -1434,8 +1434,8 @@ mod tests { use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd::structs::{KubernetesList, KubernetesNode, KubernetesVersion}; use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter}; + use crate::io_models::{ListenersHelper, QoveryIdentifier}; use crate::logger::StdIoLogger; - use crate::models::{ListenersHelper, QoveryIdentifier}; #[test] pub fn check_kubernetes_upgrade_method() { diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index eb9a412a..650b1d09 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -7,7 +7,7 @@ use crate::cloud_provider::environment::Environment; use crate::cloud_provider::kubernetes::Kubernetes; use crate::errors::EngineError; use crate::events::{EventDetails, Stage, ToTransmitter}; -use crate::models::{Context, Listen}; +use crate::io_models::{Context, Listen}; pub mod aws; pub mod digitalocean; diff --git a/src/cloud_provider/scaleway/application.rs b/src/cloud_provider/scaleway/application.rs deleted file mode 100644 index 52a87c0f..00000000 --- a/src/cloud_provider/scaleway/application.rs +++ /dev/null @@ -1,599 +0,0 @@ -use std::fmt; -use std::str::FromStr; - -use tera::Context as TeraContext; - -use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; -use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; -use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; - -pub struct ApplicationScw { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, -} - -impl ApplicationScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, - ) -> Self { - ApplicationScw { - context, - id: id.to_string(), - action, - name: name.to_string(), - ports, - total_cpus, - cpu_burst, - total_ram_in_mib, - min_instances, - max_instances, - start_timeout_in_seconds, - build, - storage, - environment_variables, - listeners, - logger, - } - } - - fn is_stateful(&self) -> bool { - !self.storage.is_empty() - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "application" - } -} - -impl Helm for ApplicationScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/scaleway/charts/q-application", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - String::new() - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl StatelessService for ApplicationScw { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl Application for ApplicationScw { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl ToTransmitter for ApplicationScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id().to_string(), self.name().to_string()) - } -} - -impl Service for ApplicationScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - // TODO(benjaminch): Switch to proper storage class - // Note: Seems volume storage type are not supported, only blocked storage for the time being - // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass - StorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", - StorageType::LocalSsd => "l_ssd", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - // container registry credentials - context.insert( - "container_registry_docker_json_config", - self.build - .image - .clone() - .registry_docker_json_config - .unwrap_or_default() - .as_str(), - ); - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for ApplicationScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for ApplicationScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, self, event_details.clone()) - }) - } -} - -impl Listen for ApplicationScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] -pub enum StorageType { - #[serde(rename = "b_ssd")] - BlockSsd, - #[serde(rename = "l_ssd")] - LocalSsd, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwRegion { - Paris, - Amsterdam, - Warsaw, -} - -impl ScwRegion { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwRegion::Paris => "fr-par", - ScwRegion::Amsterdam => "nl-ams", - ScwRegion::Warsaw => "pl-waw", - } - } -} - -impl fmt::Display for ScwRegion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwRegion::Paris => write!(f, "fr-par"), - ScwRegion::Amsterdam => write!(f, "nl-ams"), - ScwRegion::Warsaw => write!(f, "pl-waw"), - } - } -} - -impl FromStr for ScwRegion { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "fr-par" => Ok(ScwRegion::Paris), - "nl-ams" => Ok(ScwRegion::Amsterdam), - "pl-waw" => Ok(ScwRegion::Warsaw), - _ => Err(()), - } - } -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub enum ScwZone { - Paris1, - Paris2, - Paris3, - Amsterdam1, - Warsaw1, -} - -impl ScwZone { - // TODO(benjaminch): improve / refactor this! - pub fn as_str(&self) -> &str { - match self { - ScwZone::Paris1 => "fr-par-1", - ScwZone::Paris2 => "fr-par-2", - ScwZone::Paris3 => "fr-par-3", - ScwZone::Amsterdam1 => "nl-ams-1", - ScwZone::Warsaw1 => "pl-waw-1", - } - } - - pub fn region(&self) -> ScwRegion { - match self { - ScwZone::Paris1 => ScwRegion::Paris, - ScwZone::Paris2 => ScwRegion::Paris, - ScwZone::Paris3 => ScwRegion::Paris, - ScwZone::Amsterdam1 => ScwRegion::Amsterdam, - ScwZone::Warsaw1 => ScwRegion::Warsaw, - } - } - - // TODO(benjaminch): improve / refactor this! - pub fn region_str(&self) -> String { - match self { - ScwZone::Paris1 => "fr-par", - ScwZone::Paris2 => "fr-par", - ScwZone::Paris3 => "fr-par", - ScwZone::Amsterdam1 => "nl-ams", - ScwZone::Warsaw1 => "pl-waw", - } - .to_string() - } -} - -impl fmt::Display for ScwZone { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ScwZone::Paris1 => write!(f, "fr-par-1"), - ScwZone::Paris2 => write!(f, "fr-par-2"), - ScwZone::Paris3 => write!(f, "fr-par-3"), - ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), - ScwZone::Warsaw1 => write!(f, "pl-waw-1"), - } - } -} - -impl FromStr for ScwZone { - type Err = CommandError; - - fn from_str(s: &str) -> Result { - match s { - "fr-par-1" => Ok(ScwZone::Paris1), - "fr-par-2" => Ok(ScwZone::Paris2), - "fr-par-3" => Ok(ScwZone::Paris3), - "nl-ams-1" => Ok(ScwZone::Amsterdam1), - "pl-waw-1" => Ok(ScwZone::Warsaw1), - _ => { - return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{ScwRegion, ScwZone}; - use std::str::FromStr; - - #[test] - fn test_region_to_str() { - assert_eq!("fr-par", ScwRegion::Paris.as_str()); - assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); - assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); - } - - #[test] - fn test_region_from_str() { - assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); - assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); - assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); - } - - #[test] - fn test_zone_to_str() { - assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); - assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); - assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); - assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); - assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); - } - - #[test] - fn test_zone_from_str() { - assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); - assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); - assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); - assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); - assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); - } - - #[test] - fn test_zone_region() { - assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); - assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); - assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); - } -} diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs index 160094a0..f1b39561 100644 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ b/src/cloud_provider/scaleway/databases/mongodb.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct MongoDbScw { diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs index f6ce64a1..6d33eb8f 100644 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ b/src/cloud_provider/scaleway/databases/mysql.rs @@ -13,9 +13,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs index d64cbf27..d101ecbc 100644 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ b/src/cloud_provider/scaleway/databases/postgresql.rs @@ -13,9 +13,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; use std::collections::HashMap; diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs index 6c7bc8a8..528152da 100644 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ b/src/cloud_provider/scaleway/databases/redis.rs @@ -11,9 +11,9 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::DatabaseMode::MANAGED; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::DatabaseMode::MANAGED; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RedisScw { diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 089322c6..0e5e6469 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -3,9 +3,9 @@ use crate::cloud_provider::helm::{ CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; -use crate::cloud_provider::scaleway::application::{ScwRegion, ScwZone}; use crate::cloud_provider::scaleway::kubernetes::KapsuleOptions; use crate::errors::CommandError; +use crate::models::scaleway::{ScwRegion, ScwZone}; use semver::Version; use serde::{Deserialize, Serialize}; use std::fs::File; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 960c0bce..4b3e8d78 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -10,7 +10,6 @@ use crate::cloud_provider::kubernetes::{ }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::scaleway::application::ScwZone; use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites}; use crate::cloud_provider::scaleway::kubernetes::node::{ScwInstancesType, ScwNodeGroup}; use crate::cloud_provider::utilities::print_action; @@ -23,10 +22,11 @@ use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, }; +use crate::logger::Logger; +use crate::models::scaleway::ScwZone; use crate::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; diff --git a/src/cloud_provider/scaleway/mod.rs b/src/cloud_provider/scaleway/mod.rs index fa03ff8c..ceaf8c3c 100644 --- a/src/cloud_provider/scaleway/mod.rs +++ b/src/cloud_provider/scaleway/mod.rs @@ -4,9 +4,8 @@ use uuid::Uuid; use crate::cloud_provider::{CloudProvider, EngineError, Kind, TerraformStateCredentials}; use crate::constants::{SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY}; use crate::events::{EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; +use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod application; pub mod databases; pub mod kubernetes; pub mod router; diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs index ee6a6e40..f7c3cb95 100644 --- a/src/cloud_provider/scaleway/router.rs +++ b/src/cloud_provider/scaleway/router.rs @@ -12,8 +12,8 @@ use crate::cmd::helm; use crate::cmd::helm::Timeout; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners}; use ::function_name::named; pub struct RouterScw { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 216e69e1..b3111f51 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -5,7 +5,6 @@ use std::sync::mpsc::TryRecvError; use std::thread; use std::time::Duration; -use crate::build_platform::Build; use tera::Context as TeraContext; use crate::cloud_provider::environment::Environment; @@ -21,12 +20,12 @@ use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replica use crate::cmd::structs::LabelsContent; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; -use crate::logger::Logger; -use crate::models::ProgressLevel::Info; -use crate::models::{ +use crate::io_models::ProgressLevel::Info; +use crate::io_models::{ Context, DatabaseMode, Listen, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; +use crate::logger::Logger; pub trait Service: ToTransmitter { fn context(&self) -> &Context; @@ -157,10 +156,6 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait Application: StatelessService { - fn get_build(&self) -> &Build; - fn get_build_mut(&mut self) -> &mut Build; -} pub trait Router: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; @@ -348,8 +343,8 @@ pub fn default_tera_context( context.insert("max_instances", &service.max_instances()); context.insert("is_private_port", &service.private_port().is_some()); - if service.private_port().is_some() { - context.insert("private_port", &service.private_port().unwrap()); + if let Some(private_port) = service.private_port() { + context.insert("private_port", &private_port); } context.insert("version", &service.version()); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 28a20894..4c51ac0c 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -4,8 +4,8 @@ use std::collections::HashMap; use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EventDetails, EventMessage}; +use crate::io_models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use crate::logger::Logger; -use crate::models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use chrono::Duration; use core::option::Option::{None, Some}; use core::result::Result; diff --git a/src/container_registry/docr.rs b/src/container_registry/docr.rs index f0134373..a90c1e73 100644 --- a/src/container_registry/docr.rs +++ b/src/container_registry/docr.rs @@ -7,7 +7,7 @@ use crate::build_platform::Image; use crate::cmd::command::QoveryCommand; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::utilities; use url::Url; diff --git a/src/container_registry/ecr.rs b/src/container_registry/ecr.rs index ec9ef8ae..83363bdb 100644 --- a/src/container_registry/ecr.rs +++ b/src/container_registry/ecr.rs @@ -14,10 +14,10 @@ use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; use crate::events::{EngineEvent, EventMessage, GeneralStep, Stage}; -use crate::logger::Logger; -use crate::models::{ +use crate::io_models::{ Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, }; +use crate::logger::Logger; use crate::runtime::block_on; use retry::delay::Fixed; use retry::Error::Operation; diff --git a/src/container_registry/mod.rs b/src/container_registry/mod.rs index 7f1bd271..28f10a5b 100644 --- a/src/container_registry/mod.rs +++ b/src/container_registry/mod.rs @@ -5,7 +5,7 @@ use crate::build_platform::Image; use crate::container_registry::errors::ContainerRegistryError; use crate::errors::EngineError; use crate::events::{EventDetails, Stage, Transmitter}; -use crate::models::{Context, Listen, QoveryIdentifier}; +use crate::io_models::{Context, Listen, QoveryIdentifier}; pub mod docr; pub mod ecr; diff --git a/src/container_registry/scaleway_container_registry.rs b/src/container_registry/scaleway_container_registry.rs index 7fab0e68..3382b07b 100644 --- a/src/container_registry/scaleway_container_registry.rs +++ b/src/container_registry/scaleway_container_registry.rs @@ -1,13 +1,12 @@ extern crate scaleway_api_rs; -use crate::cloud_provider::scaleway::application::ScwZone; - use self::scaleway_api_rs::models::scaleway_registry_v1_namespace::Status; use crate::build_platform::Image; use crate::cmd::docker; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::{ContainerRegistry, ContainerRegistryInfo, Kind}; -use crate::models::{Context, Listen, Listener, Listeners}; +use crate::io_models::{Context, Listen, Listener, Listeners}; +use crate::models::scaleway::ScwZone; use crate::runtime::block_on; use url::Url; diff --git a/src/dns_provider/cloudflare.rs b/src/dns_provider/cloudflare.rs index 48e8939c..2ed36465 100644 --- a/src/dns_provider/cloudflare.rs +++ b/src/dns_provider/cloudflare.rs @@ -2,7 +2,7 @@ use std::net::Ipv4Addr; use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::{DnsProvider, Kind}; -use crate::models::{Context, Domain}; +use crate::io_models::{Context, Domain}; pub struct Cloudflare { context: Context, diff --git a/src/dns_provider/mod.rs b/src/dns_provider/mod.rs index ce52fa81..bba32a2d 100644 --- a/src/dns_provider/mod.rs +++ b/src/dns_provider/mod.rs @@ -3,7 +3,7 @@ use std::net::Ipv4Addr; use crate::dns_provider::errors::DnsProviderError; use serde::{Deserialize, Serialize}; -use crate::models::{Context, Domain}; +use crate::io_models::{Context, Domain}; pub mod cloudflare; pub mod errors; diff --git a/src/engine.rs b/src/engine.rs index a4c22bfe..3e296c2e 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -10,7 +10,7 @@ use crate::container_registry::ContainerRegistry; use crate::dns_provider::errors::DnsProviderError; use crate::dns_provider::DnsProvider; use crate::errors::EngineError; -use crate::models::Context; +use crate::io_models::Context; #[derive(Error, Debug, PartialEq)] pub enum EngineConfigError { diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 9cc9c1eb..22184db2 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -10,7 +10,7 @@ use crate::cmd::helm::HelmError; use crate::container_registry::errors::ContainerRegistryError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; use crate::object_storage::errors::ObjectStorageError; use std::fmt::{Display, Formatter}; use thiserror::Error; diff --git a/src/events/mod.rs b/src/events/mod.rs index eb5565f8..deb2eb19 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -8,7 +8,7 @@ extern crate url; use crate::cloud_provider::Kind; use crate::errors::{CommandError, EngineError}; -use crate::models::QoveryIdentifier; +use crate::io_models::QoveryIdentifier; use std::fmt::{Display, Formatter}; #[derive(Debug, Clone)] diff --git a/src/models.rs b/src/io_models.rs similarity index 92% rename from src/models.rs rename to src/io_models.rs index e2b0bb5b..ab929c25 100644 --- a/src/models.rs +++ b/src/io_models.rs @@ -14,20 +14,17 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; -use crate::cloud_provider::aws::application::ApplicationAws; use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; use crate::cloud_provider::aws::databases::mysql::MySQLAws; use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; use crate::cloud_provider::aws::databases::redis::RedisAws; use crate::cloud_provider::aws::router::RouterAws; -use crate::cloud_provider::digitalocean::application::ApplicationDo; use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; use crate::cloud_provider::digitalocean::databases::redis::RedisDo; use crate::cloud_provider::digitalocean::router::RouterDo; use crate::cloud_provider::environment::Environment; -use crate::cloud_provider::scaleway::application::ApplicationScw; use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; @@ -40,6 +37,12 @@ use crate::cloud_provider::Kind as CPKind; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; +use crate::models; +use crate::models::application::IApplication; +use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; +use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; +use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; +use crate::models::types::{AWS, DO, SCW}; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -219,65 +222,77 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Option> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new(ApplicationAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), - CPKind::Do => Some(Box::new(ApplicationDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), - CPKind::Scw => Some(Box::new(ApplicationScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - listeners, - logger.clone(), - ))), + CPKind::Aws => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + AwsAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), + CPKind::Do => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + DoAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), + CPKind::Scw => Some(Box::new( + models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + ScwAppExtraSettings {}, + listeners, + logger.clone(), + ) + .unwrap(), + )), } } @@ -439,17 +454,15 @@ pub enum StorageType { } impl Storage { - pub fn to_aws_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_aws_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), storage_type: match self.storage_type { - StorageType::SlowHdd => crate::cloud_provider::aws::application::StorageType::SC1, - StorageType::Hdd => crate::cloud_provider::aws::application::StorageType::ST1, - StorageType::Ssd => crate::cloud_provider::aws::application::StorageType::GP2, - StorageType::FastSsd => crate::cloud_provider::aws::application::StorageType::IO1, + StorageType::SlowHdd => AwsStorageType::SC1, + StorageType::Hdd => AwsStorageType::ST1, + StorageType::Ssd => AwsStorageType::GP2, + StorageType::FastSsd => AwsStorageType::IO1, }, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), @@ -457,26 +470,22 @@ impl Storage { } } - pub fn to_do_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_do_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::digitalocean::application::StorageType::Standard, + storage_type: DoStorageType::Standard, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, } } - pub fn to_scw_storage( - &self, - ) -> crate::cloud_provider::models::Storage { + pub fn to_scw_storage(&self) -> crate::cloud_provider::models::Storage { crate::cloud_provider::models::Storage { id: self.id.clone(), name: self.name.clone(), - storage_type: crate::cloud_provider::scaleway::application::StorageType::BlockSsd, + storage_type: ScwStorageType::BlockSsd, size_in_gib: self.size_in_gib, mount_point: self.mount_point.clone(), snapshot_retention_in_days: self.snapshot_retention_in_days, @@ -1314,7 +1323,7 @@ impl ToTerraformString for Ipv4Addr { #[cfg(test)] mod tests { - use crate::models::{Domain, QoveryIdentifier}; + use crate::io_models::{Domain, QoveryIdentifier}; #[test] fn test_domain_new() { diff --git a/src/lib.rs b/src/lib.rs index 2d0cb7b2..00177df1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,7 @@ pub mod errors; pub mod events; pub mod fs; pub mod git; +pub mod io_models; pub mod logger; pub mod models; pub mod object_storage; diff --git a/src/logger.rs b/src/logger.rs index 02f62444..4497f4b9 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -72,12 +72,12 @@ impl Logger for StdIoLogger { #[cfg(test)] mod tests { use super::*; - use crate::cloud_provider::scaleway::application::ScwRegion; use crate::cloud_provider::Kind; use crate::errors; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; - use crate::models::QoveryIdentifier; + use crate::io_models::QoveryIdentifier; + use crate::models::scaleway::ScwRegion; use tracing_test::traced_test; use url::Url; use uuid::Uuid; diff --git a/src/cloud_provider/aws/application.rs b/src/models/application.rs similarity index 54% rename from src/cloud_provider/aws/application.rs rename to src/models/application.rs index 49f3230f..b7a01601 100644 --- a/src/cloud_provider/aws/application.rs +++ b/src/models/application.rs @@ -1,45 +1,52 @@ -use tera::Context as TeraContext; - use crate::build_platform::Build; -use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{ - EnvironmentVariable, EnvironmentVariableDataTemplate, Storage, StorageDataTemplate, -}; +use crate::cloud_provider::models::{EnvironmentVariable, Storage}; +use crate::cloud_provider::service::{delete_stateless_service, scale_down_application}; use crate::cloud_provider::service::{ - default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service, - scale_down_application, send_progress_on_long_task, Action, Application, Create, Delete, Helm, Pause, Service, - ServiceType, StatelessService, + deploy_stateless_service_error, deploy_user_stateless_service, send_progress_on_long_task, Action, Create, Delete, + Helm, Pause, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::EngineError; -use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter}; +use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port}; -use ::function_name::named; +use crate::models::types::CloudProvider; +use function_name::named; +use std::marker::PhantomData; +use tera::Context as TeraContext; -pub struct ApplicationAws { - context: Context, - id: String, - action: Action, - name: String, - ports: Vec, - total_cpus: String, - cpu_burst: String, - total_ram_in_mib: u32, - min_instances: u32, - max_instances: u32, - start_timeout_in_seconds: u32, - build: Build, - storage: Vec>, - environment_variables: Vec, - listeners: Listeners, - logger: Box, +#[derive(thiserror::Error, Debug)] +pub enum ApplicationError { + #[error("Application invalid configuration: {0}")] + InvalidConfig(String), } -impl ApplicationAws { +pub struct Application { + _marker: PhantomData, + pub(crate) context: Context, + pub(crate) id: String, + pub(crate) action: Action, + pub(crate) name: String, + pub(crate) ports: Vec, + pub(crate) total_cpus: String, + pub(crate) cpu_burst: String, + pub(crate) total_ram_in_mib: u32, + pub(crate) min_instances: u32, + pub(crate) max_instances: u32, + pub(crate) start_timeout_in_seconds: u32, + pub(crate) build: Build, + pub(crate) storage: Vec>, + pub(crate) environment_variables: Vec, + pub(crate) listeners: Listeners, + pub(crate) logger: Box, + pub(crate) _extra_settings: T::AppExtraSettings, +} + +// Here we define the common behavior among all providers +impl Application { pub fn new( context: Context, id: &str, @@ -53,12 +60,16 @@ impl ApplicationAws { max_instances: u32, start_timeout_in_seconds: u32, build: Build, - storage: Vec>, + storage: Vec>, environment_variables: Vec, + extra_settings: T::AppExtraSettings, listeners: Listeners, logger: Box, - ) -> Self { - ApplicationAws { + ) -> Result { + // TODO: Check that the information provided are coherent + + Ok(Self { + _marker: PhantomData, context, id: id.to_string(), action, @@ -75,33 +86,220 @@ impl ApplicationAws { environment_variables, listeners, logger, - } + _extra_settings: extra_settings, + }) } - fn is_stateful(&self) -> bool { + pub fn is_stateful(&self) -> bool { !self.storage.is_empty() } - fn cloud_provider_name(&self) -> &str { - "aws" + pub fn context(&self) -> &Context { + &self.context } - fn struct_name(&self) -> &str { - "application" + pub fn service_type(&self) -> ServiceType { + ServiceType::Application + } + + pub fn id(&self) -> &str { + self.id.as_str() + } + + pub fn name(&self) -> &str { + self.name.as_str() + } + + pub fn commit_id(&self) -> String { + self.build.image.commit_id.clone() + } + + pub fn action(&self) -> &Action { + &self.action + } + + pub fn public_port(&self) -> Option { + self.ports + .iter() + .find(|port| port.publicly_accessible) + .map(|port| port.port as u16) + } + + pub fn start_timeout(&self) -> u32 { + (self.start_timeout_in_seconds + 10) * 4 + } + + pub fn total_cpus(&self) -> String { + self.total_cpus.to_string() + } + + pub fn cpu_burst(&self) -> String { + self.cpu_burst.to_string() + } + + pub fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib + } + + pub fn min_instances(&self) -> u32 { + self.min_instances + } + + pub fn max_instances(&self) -> u32 { + self.max_instances + } + + pub fn publicly_accessible(&self) -> bool { + self.public_port().is_some() + } + + pub fn logger(&self) -> &dyn Logger { + &*self.logger + } + + pub fn selector(&self) -> Option { + Some(format!("appId={}", self.id())) + } + + pub fn build(&self) -> &Build { + &self.build + } + + pub fn build_mut(&mut self) -> &mut Build { + &mut self.build + } + + pub fn sanitize_name(&self) -> String { + sanitize_name("app", self.id()) + } + + pub(crate) fn get_event_details(&self, stage: Stage) -> EventDetails { + let context = self.context(); + EventDetails::new( + None, + QoveryIdentifier::from(context.organization_id().to_string()), + QoveryIdentifier::from(context.cluster_id().to_string()), + QoveryIdentifier::from(context.execution_id().to_string()), + None, + stage, + self.to_transmitter(), + ) } } -impl Helm for ApplicationAws { +// Traits implementations +impl ToTransmitter for Application { + fn to_transmitter(&self) -> Transmitter { + Transmitter::Application(self.id.to_string(), self.name.to_string()) + } +} + +impl Listen for Application { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} + +pub(crate) trait ToTeraContext { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result; +} + +impl Service for Application +where + Application: ToTeraContext, +{ + fn context(&self) -> &Context { + self.context() + } + + fn service_type(&self) -> ServiceType { + self.service_type() + } + + fn id(&self) -> &str { + self.id() + } + + fn name(&self) -> &str { + self.name() + } + + fn sanitized_name(&self) -> String { + self.sanitize_name() + } + + fn version(&self) -> String { + self.commit_id() + } + + fn action(&self) -> &Action { + self.action() + } + + fn private_port(&self) -> Option { + self.public_port() + } + + fn start_timeout(&self) -> Timeout { + Timeout::Value(self.start_timeout()) + } + + fn total_cpus(&self) -> String { + self.total_cpus() + } + + fn cpu_burst(&self) -> String { + self.cpu_burst() + } + + fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib() + } + + fn min_instances(&self) -> u32 { + self.min_instances() + } + + fn max_instances(&self) -> u32 { + self.max_instances() + } + + fn publicly_accessible(&self) -> bool { + self.publicly_accessible() + } + + fn tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context(target) + } + + fn logger(&self) -> &dyn Logger { + self.logger() + } + + fn selector(&self) -> Option { + self.selector() + } +} + +impl Helm for Application { fn helm_selector(&self) -> Option { self.selector() } fn helm_release_name(&self) -> String { - crate::string::cut(format!("application-{}-{}", self.name(), self.id()), 50) + crate::string::cut(format!("application-{}-{}", self.id(), self.id()), 50) } fn helm_chart_dir(&self) -> String { - format!("{}/aws/charts/q-application", self.context.lib_root_dir()) + format!( + "{}/{}/charts/q-application", + self.context.lib_root_dir(), + T::helm_directory_name(), + ) } fn helm_chart_values_dir(&self) -> String { @@ -113,185 +311,16 @@ impl Helm for ApplicationAws { } } -impl StatelessService for ApplicationAws { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl ToTransmitter for ApplicationAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id.to_string(), self.name.to_string()) - } -} - -impl Application for ApplicationAws { - fn get_build(&self) -> &Build { - &self.build - } - - fn get_build_mut(&mut self) -> &mut Build { - &mut self.build - } -} - -impl Service for ApplicationAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Application - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("app", self.name()) - } - - fn version(&self) -> String { - self.build.image.commit_id.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - self.ports - .iter() - .find(|port| port.publicly_accessible) - .map(|port| port.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value((self.start_timeout_in_seconds + 10) * 4) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - self.cpu_burst.to_string() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - self.min_instances - } - - fn max_instances(&self) -> u32 { - self.max_instances - } - - fn publicly_accessible(&self) -> bool { - self.private_port().is_some() - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let mut context = default_tera_context(self, target.kubernetes, target.environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - context.insert("registry_secret", self.build.image.registry_host()); - - let cpu_limits = match validate_k8s_required_cpu_and_burstable( - &ListenersHelper::new(&self.listeners), - self.context.execution_id(), - &self.id, - self.total_cpus(), - self.cpu_burst(), - event_details.clone(), - self.logger(), - ) { - Ok(l) => l, - Err(e) => { - return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( - event_details, - self.total_cpus(), - self.cpu_burst(), - e, - )); - } - }; - - context.insert("cpu_burst", &cpu_limits.cpu_limit); - - let storage = self - .storage - .iter() - .map(|s| StorageDataTemplate { - id: s.id.clone(), - name: s.name.clone(), - storage_type: match s.storage_type { - StorageType::SC1 => "sc1", - StorageType::ST1 => "st1", - StorageType::GP2 => "gp2", - StorageType::IO1 => "io1", - } - .to_string(), - size_in_gib: s.size_in_gib, - mount_point: s.mount_point.clone(), - snapshot_retention_in_days: s.snapshot_retention_in_days, - }) - .collect::>(); - - let is_storage = !storage.is_empty(); - - context.insert("storage", &storage); - context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("appId={}", self.id)) - } -} - -impl Create for ApplicationAws { +impl Create for Application +where + Application: Service, +{ #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -310,8 +339,8 @@ impl Create for ApplicationAws { fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -324,13 +353,16 @@ impl Create for ApplicationAws { } } -impl Pause for ApplicationAws { +impl Pause for Application +where + Application: Service, +{ #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -350,8 +382,8 @@ impl Pause for ApplicationAws { fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details, @@ -362,13 +394,16 @@ impl Pause for ApplicationAws { } } -impl Delete for ApplicationAws { +impl Delete for Application +where + Application: Service, +{ #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details.clone(), @@ -388,8 +423,8 @@ impl Delete for ApplicationAws { fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "application", function_name!(), self.name(), event_details.clone(), @@ -402,20 +437,29 @@ impl Delete for ApplicationAws { } } -impl Listen for ApplicationAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); +impl StatelessService for Application +where + Application: Service, +{ + fn as_stateless_service(&self) -> &dyn StatelessService { + self } } -#[derive(Clone, Eq, PartialEq, Hash)] -pub enum StorageType { - SC1, - ST1, - GP2, - IO1, +pub trait IApplication: StatelessService { + fn get_build(&self) -> &Build; + fn get_build_mut(&mut self) -> &mut Build; +} + +impl IApplication for Application +where + Application: Service, +{ + fn get_build(&self) -> &Build { + self.build() + } + + fn get_build_mut(&mut self) -> &mut Build { + self.build_mut() + } } diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs new file mode 100644 index 00000000..1d21f284 --- /dev/null +++ b/src/models/aws/application.rs @@ -0,0 +1,90 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::aws::AwsStorageType; +use crate::models::types::AWS; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let mut context = default_tera_context(self, target.kubernetes, target.environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build().image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.build().image.registry_host()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + AwsStorageType::SC1 => "sc1", + AwsStorageType::ST1 => "st1", + AwsStorageType::GP2 => "gp2", + AwsStorageType::IO1 => "io1", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/aws/mod.rs b/src/models/aws/mod.rs new file mode 100644 index 00000000..2bbfbc4f --- /dev/null +++ b/src/models/aws/mod.rs @@ -0,0 +1,43 @@ +pub mod application; + +use crate::models::types::CloudProvider; +use crate::models::types::AWS; + +pub struct AwsAppExtraSettings {} +pub struct AwsDbExtraSettings {} +pub struct AwsRouterExtraSettings {} + +impl CloudProvider for AWS { + type AppExtraSettings = AwsAppExtraSettings; + type DbExtraSettings = AwsDbExtraSettings; + type RouterExtraSettings = AwsRouterExtraSettings; + type StorageTypes = AwsStorageType; + + fn short_name() -> &'static str { + "AWS" + } + + fn full_name() -> &'static str { + "Amazon Web Service" + } + + fn registry_short_name() -> &'static str { + "ECR" + } + + fn registry_full_name() -> &'static str { + "Elastic Container Registry" + } + + fn helm_directory_name() -> &'static str { + "aws" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum AwsStorageType { + SC1, + ST1, + GP2, + IO1, +} diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs new file mode 100644 index 00000000..5a2e7a61 --- /dev/null +++ b/src/models/digital_ocean/application.rs @@ -0,0 +1,91 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::digital_ocean::DoStorageType; +use crate::models::types::DO; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + + // This is specific to digital ocean as it is them that create the registry secret + // we don't have the hand on it + context.insert("registry_secret", "do-container-registry-secret-for-cluster"); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + DoStorageType::Standard => "do-block-storage", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/digital_ocean/mod.rs b/src/models/digital_ocean/mod.rs new file mode 100644 index 00000000..30c11461 --- /dev/null +++ b/src/models/digital_ocean/mod.rs @@ -0,0 +1,126 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::DO; +use std::fmt; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; + +pub struct DoAppExtraSettings {} +pub struct DoDbExtraSettings {} +pub struct DoRouterExtraSettings {} + +impl CloudProvider for DO { + type AppExtraSettings = DoAppExtraSettings; + type DbExtraSettings = DoDbExtraSettings; + type RouterExtraSettings = DoRouterExtraSettings; + type StorageTypes = DoStorageType; + + fn short_name() -> &'static str { + "DO" + } + + fn full_name() -> &'static str { + "Digital Ocean" + } + + fn registry_short_name() -> &'static str { + "DO CR" + } + + fn registry_full_name() -> &'static str { + "Digital Ocean Container Registry" + } + + fn helm_directory_name() -> &'static str { + "digitalocean" + } +} + +#[derive(Clone, Eq, PartialEq, Hash)] +pub enum DoStorageType { + Standard, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum DoRegion { + NewYorkCity1, + NewYorkCity2, + NewYorkCity3, + Amsterdam2, + Amsterdam3, + SanFrancisco1, + SanFrancisco2, + SanFrancisco3, + Singapore, + London, + Frankfurt, + Toronto, + Bangalore, +} + +impl DoRegion { + pub fn as_str(&self) -> &str { + match self { + DoRegion::NewYorkCity1 => "nyc1", + DoRegion::NewYorkCity2 => "nyc2", + DoRegion::NewYorkCity3 => "nyc3", + DoRegion::Amsterdam2 => "ams2", + DoRegion::Amsterdam3 => "ams3", + DoRegion::SanFrancisco1 => "sfo1", + DoRegion::SanFrancisco2 => "sfo2", + DoRegion::SanFrancisco3 => "sfo3", + DoRegion::Singapore => "sgp1", + DoRegion::London => "lon1", + DoRegion::Frankfurt => "fra1", + DoRegion::Toronto => "tor1", + DoRegion::Bangalore => "blr1", + } + } +} + +impl Display for DoRegion { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + DoRegion::NewYorkCity1 => write!(f, "nyc1"), + DoRegion::NewYorkCity2 => write!(f, "nyc2"), + DoRegion::NewYorkCity3 => write!(f, "nyc3"), + DoRegion::Amsterdam2 => write!(f, "ams2"), + DoRegion::Amsterdam3 => write!(f, "ams3"), + DoRegion::SanFrancisco1 => write!(f, "sfo1"), + DoRegion::SanFrancisco2 => write!(f, "sfo2"), + DoRegion::SanFrancisco3 => write!(f, "sfo3"), + DoRegion::Singapore => write!(f, "sgp1"), + DoRegion::London => write!(f, "lon1"), + DoRegion::Frankfurt => write!(f, "fra1"), + DoRegion::Toronto => write!(f, "tor1"), + DoRegion::Bangalore => write!(f, "blr1"), + } + } +} + +impl FromStr for DoRegion { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "nyc1" => Ok(DoRegion::NewYorkCity1), + "nyc2" => Ok(DoRegion::NewYorkCity2), + "nyc3" => Ok(DoRegion::NewYorkCity3), + "ams2" => Ok(DoRegion::Amsterdam2), + "ams3" => Ok(DoRegion::Amsterdam3), + "sfo1" => Ok(DoRegion::SanFrancisco1), + "sfo2" => Ok(DoRegion::SanFrancisco2), + "sfo3" => Ok(DoRegion::SanFrancisco3), + "sgp1" => Ok(DoRegion::Singapore), + "lon1" => Ok(DoRegion::London), + "fra1" => Ok(DoRegion::Frankfurt), + "tor1" => Ok(DoRegion::Toronto), + "blr1" => Ok(DoRegion::Bangalore), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` region is not supported", s))); + } + } + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 00000000..8db33ab5 --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,5 @@ +pub mod application; +pub mod aws; +pub mod digital_ocean; +pub mod scaleway; +pub mod types; diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs new file mode 100644 index 00000000..2b14300f --- /dev/null +++ b/src/models/scaleway/application.rs @@ -0,0 +1,103 @@ +use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; +use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; +use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, Stage}; +use crate::io_models::ListenersHelper; +use crate::models::application::{Application, ToTeraContext}; +use crate::models::scaleway::ScwStorageType; +use crate::models::types::SCW; +use tera::Context as TeraContext; + +impl ToTeraContext for Application { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + let commit_id = self.build.image.commit_id.as_str(); + + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); + + let cpu_limits = match validate_k8s_required_cpu_and_burstable( + &ListenersHelper::new(&self.listeners), + self.context.execution_id(), + &self.id, + self.total_cpus(), + self.cpu_burst(), + event_details.clone(), + self.logger(), + ) { + Ok(l) => l, + Err(e) => { + return Err(EngineError::new_k8s_validate_required_cpu_and_burstable_error( + event_details, + self.total_cpus(), + self.cpu_burst(), + e, + )); + } + }; + context.insert("cpu_burst", &cpu_limits.cpu_limit); + + let storage = self + .storage + .iter() + .map(|s| StorageDataTemplate { + id: s.id.clone(), + name: s.name.clone(), + storage_type: match s.storage_type { + // TODO(benjaminch): Switch to proper storage class + // Note: Seems volume storage type are not supported, only blocked storage for the time being + // https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass + ScwStorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd", + ScwStorageType::LocalSsd => "l_ssd", + } + .to_string(), + size_in_gib: s.size_in_gib, + mount_point: s.mount_point.clone(), + snapshot_retention_in_days: s.snapshot_retention_in_days, + }) + .collect::>(); + + let is_storage = !storage.is_empty(); + + context.insert("storage", &storage); + context.insert("is_storage", &is_storage); + context.insert("clone", &false); + context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + // container registry credentials + context.insert( + "container_registry_docker_json_config", + self.build + .image + .clone() + .registry_docker_json_config + .unwrap_or_default() + .as_str(), + ); + + Ok(context) + } +} diff --git a/src/models/scaleway/mod.rs b/src/models/scaleway/mod.rs new file mode 100644 index 00000000..ab73a293 --- /dev/null +++ b/src/models/scaleway/mod.rs @@ -0,0 +1,207 @@ +mod application; + +use crate::errors::CommandError; +use crate::models::types::CloudProvider; +use crate::models::types::SCW; +use std::fmt; +use std::str::FromStr; + +pub struct ScwAppExtraSettings {} +pub struct ScwDbExtraSettings {} +pub struct ScwRouterExtraSettings {} + +impl CloudProvider for SCW { + type AppExtraSettings = ScwAppExtraSettings; + type DbExtraSettings = ScwDbExtraSettings; + type RouterExtraSettings = ScwRouterExtraSettings; + type StorageTypes = ScwStorageType; + + fn short_name() -> &'static str { + "SCW" + } + + fn full_name() -> &'static str { + "Scaleway" + } + + fn registry_short_name() -> &'static str { + "SCW CR" + } + + fn registry_full_name() -> &'static str { + "Scaleway Container Registry" + } + + fn helm_directory_name() -> &'static str { + "scaleway" + } +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash, serde_derive::Serialize, serde_derive::Deserialize)] +pub enum ScwStorageType { + #[serde(rename = "b_ssd")] + BlockSsd, + #[serde(rename = "l_ssd")] + LocalSsd, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwRegion { + Paris, + Amsterdam, + Warsaw, +} + +impl ScwRegion { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwRegion::Paris => "fr-par", + ScwRegion::Amsterdam => "nl-ams", + ScwRegion::Warsaw => "pl-waw", + } + } +} + +impl fmt::Display for ScwRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwRegion::Paris => write!(f, "fr-par"), + ScwRegion::Amsterdam => write!(f, "nl-ams"), + ScwRegion::Warsaw => write!(f, "pl-waw"), + } + } +} + +impl FromStr for ScwRegion { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "fr-par" => Ok(ScwRegion::Paris), + "nl-ams" => Ok(ScwRegion::Amsterdam), + "pl-waw" => Ok(ScwRegion::Warsaw), + _ => Err(()), + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum ScwZone { + Paris1, + Paris2, + Paris3, + Amsterdam1, + Warsaw1, +} + +impl ScwZone { + // TODO(benjaminch): improve / refactor this! + pub fn as_str(&self) -> &str { + match self { + ScwZone::Paris1 => "fr-par-1", + ScwZone::Paris2 => "fr-par-2", + ScwZone::Paris3 => "fr-par-3", + ScwZone::Amsterdam1 => "nl-ams-1", + ScwZone::Warsaw1 => "pl-waw-1", + } + } + + pub fn region(&self) -> ScwRegion { + match self { + ScwZone::Paris1 => ScwRegion::Paris, + ScwZone::Paris2 => ScwRegion::Paris, + ScwZone::Paris3 => ScwRegion::Paris, + ScwZone::Amsterdam1 => ScwRegion::Amsterdam, + ScwZone::Warsaw1 => ScwRegion::Warsaw, + } + } + + // TODO(benjaminch): improve / refactor this! + pub fn region_str(&self) -> String { + match self { + ScwZone::Paris1 => "fr-par", + ScwZone::Paris2 => "fr-par", + ScwZone::Paris3 => "fr-par", + ScwZone::Amsterdam1 => "nl-ams", + ScwZone::Warsaw1 => "pl-waw", + } + .to_string() + } +} + +impl fmt::Display for ScwZone { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ScwZone::Paris1 => write!(f, "fr-par-1"), + ScwZone::Paris2 => write!(f, "fr-par-2"), + ScwZone::Paris3 => write!(f, "fr-par-3"), + ScwZone::Amsterdam1 => write!(f, "nl-ams-1"), + ScwZone::Warsaw1 => write!(f, "pl-waw-1"), + } + } +} + +impl FromStr for ScwZone { + type Err = CommandError; + + fn from_str(s: &str) -> Result { + match s { + "fr-par-1" => Ok(ScwZone::Paris1), + "fr-par-2" => Ok(ScwZone::Paris2), + "fr-par-3" => Ok(ScwZone::Paris3), + "nl-ams-1" => Ok(ScwZone::Amsterdam1), + "pl-waw-1" => Ok(ScwZone::Warsaw1), + _ => { + return Err(CommandError::new_from_safe_message(format!("`{}` zone is not supported", s))); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::{ScwRegion, ScwZone}; + use std::str::FromStr; + + #[test] + fn test_region_to_str() { + assert_eq!("fr-par", ScwRegion::Paris.as_str()); + assert_eq!("nl-ams", ScwRegion::Amsterdam.as_str()); + assert_eq!("pl-waw", ScwRegion::Warsaw.as_str()); + } + + #[test] + fn test_region_from_str() { + assert_eq!(ScwRegion::from_str("fr-par"), Ok(ScwRegion::Paris)); + assert_eq!(ScwRegion::from_str("nl-ams"), Ok(ScwRegion::Amsterdam)); + assert_eq!(ScwRegion::from_str("pl-waw"), Ok(ScwRegion::Warsaw)); + } + + #[test] + fn test_zone_to_str() { + assert_eq!("fr-par-1", ScwZone::Paris1.as_str()); + assert_eq!("fr-par-2", ScwZone::Paris2.as_str()); + assert_eq!("fr-par-3", ScwZone::Paris3.as_str()); + assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str()); + assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str()); + } + + #[test] + fn test_zone_from_str() { + assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1)); + assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2)); + assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3)); + assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1)); + assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1)); + } + + #[test] + fn test_zone_region() { + assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris); + assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam); + assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw); + } +} diff --git a/src/models/types.rs b/src/models/types.rs new file mode 100644 index 00000000..75fef0ea --- /dev/null +++ b/src/models/types.rs @@ -0,0 +1,19 @@ +// Those types are just marker types that are use to tag our struct/object model +pub struct AWS {} +pub struct DO {} +pub struct SCW {} + +// CloudProvider trait allows to derive all the custom type we need per provider, +// with our marker type defined above to be able to select the correct one +pub trait CloudProvider { + type AppExtraSettings; + type DbExtraSettings; + type RouterExtraSettings; + type StorageTypes; + + fn short_name() -> &'static str; + fn full_name() -> &'static str; + fn registry_short_name() -> &'static str; + fn registry_full_name() -> &'static str; + fn helm_directory_name() -> &'static str; +} diff --git a/src/object_storage/mod.rs b/src/object_storage/mod.rs index baa4de6a..27684d8a 100644 --- a/src/object_storage/mod.rs +++ b/src/object_storage/mod.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::errors::ObjectStorageError; use std::fs::File; diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 886d962f..0de654aa 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -14,7 +14,7 @@ use rusoto_s3::{ }; use tokio::io; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime::block_on; diff --git a/src/object_storage/scaleway_object_storage.rs b/src/object_storage/scaleway_object_storage.rs index 664af26f..1b0e376d 100644 --- a/src/object_storage/scaleway_object_storage.rs +++ b/src/object_storage/scaleway_object_storage.rs @@ -2,10 +2,10 @@ use chrono::{DateTime, Utc}; use std::fs::File; use std::path::Path; -use crate::cloud_provider::scaleway::application::ScwZone; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; use crate::object_storage::{Kind, ObjectStorage}; +use crate::models::scaleway::ScwZone; use crate::object_storage::errors::ObjectStorageError; use crate::runtime::block_on; use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; diff --git a/src/object_storage/spaces.rs b/src/object_storage/spaces.rs index 1785b29e..b337f060 100644 --- a/src/object_storage/spaces.rs +++ b/src/object_storage/spaces.rs @@ -11,8 +11,8 @@ use rusoto_s3::{ }; use tokio::io; -use crate::cloud_provider::digitalocean::application::DoRegion; -use crate::models::{Context, StringPath}; +use crate::io_models::{Context, StringPath}; +use crate::models::digital_ocean::DoRegion; use crate::object_storage::errors::ObjectStorageError; use crate::object_storage::{Kind, ObjectStorage}; use crate::runtime; diff --git a/src/transaction.rs b/src/transaction.rs index 10eb6ee8..785c3b6e 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -4,14 +4,17 @@ use std::cell::RefCell; use std::rc::Rc; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::service::{Action, Application, Service}; +use crate::cloud_provider::service::{Action, Service}; use crate::container_registry::errors::ContainerRegistryError; use crate::container_registry::to_engine_error; use crate::engine::{EngineConfig, EngineConfigError}; use crate::errors::{EngineError, Tag}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, Transmitter}; +use crate::io_models::{ + EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, +}; use crate::logger::Logger; -use crate::models::{EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier}; +use crate::models::application::IApplication; pub struct Transaction<'a> { engine: &'a EngineConfig, @@ -128,7 +131,7 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - applications: &mut [Box], + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 44d41a7d..36fb944c 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -12,8 +12,8 @@ use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::engine::EngineConfig; +use qovery_engine::io_models::{Context, NoOpProgressListener}; use qovery_engine::logger::Logger; -use qovery_engine::models::{Context, NoOpProgressListener}; use std::str::FromStr; use std::sync::Arc; use tracing::error; diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs index 037b2456..6cee1e77 100644 --- a/test_utilities/src/cloudflare.rs +++ b/test_utilities/src/cloudflare.rs @@ -2,7 +2,7 @@ use crate::common::ClusterDomain; use crate::utilities::FuncTestsSecrets; use qovery_engine::dns_provider::cloudflare::Cloudflare; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{Context, Domain}; +use qovery_engine::io_models::{Context, Domain}; pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box { let secrets = FuncTestsSecrets::new(); diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 047f8e03..b07d30c1 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -6,7 +6,7 @@ use std::cell::RefCell; use qovery_engine::cloud_provider::utilities::sanitize_name; use qovery_engine::dns_provider::DnsProvider; -use qovery_engine::models::{ +use qovery_engine::io_models::{ Action, Application, CloneForTest, Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, GitCredentials, Port, Protocol, Route, Router, Storage, StorageType, }; @@ -22,13 +22,11 @@ use base64; use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; use qovery_engine::cloud_provider::environment::Environment; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, Kind}; @@ -36,8 +34,10 @@ use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::CONTAINER; use qovery_engine::logger::Logger; -use qovery_engine::models::DatabaseMode::CONTAINER; +use qovery_engine::models::digital_ocean::DoRegion; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::transaction::{DeploymentOption, Transaction, TransactionResult}; use std::collections::BTreeMap; use std::path::Path; @@ -1116,7 +1116,7 @@ pub fn test_db( app.environment_vars = db_infos.app_env_vars.clone(); app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 4192c634..36a5db93 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -7,18 +7,18 @@ use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; +use qovery_engine::models::digital_ocean::DoRegion; pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const DO_KUBERNETES_MINOR_VERSION: u8 = 20; diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index fa915f9c..d3c570bf 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,12 +1,11 @@ use const_format::formatcp; use qovery_engine::build_platform::Build; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::engine::EngineConfig; -use qovery_engine::models::{Context, EnvironmentRequest, NoOpProgressListener}; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; @@ -22,6 +21,7 @@ use qovery_engine::container_registry::errors::ContainerRegistryError; use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::logger::Logger; +use qovery_engine::models::scaleway::ScwZone; use tracing::error; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index eb3798b5..3b553377 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -11,7 +11,6 @@ use gethostname; use std::collections::BTreeMap; use std::io::{Error, ErrorKind, Write}; use std::path::Path; -use std::str::FromStr; use passwords::PasswordGenerator; use qovery_engine::cloud_provider::digitalocean::kubernetes::doks_api::get_do_kubeconfig_by_cluster_name; @@ -21,6 +20,7 @@ use retry::delay::Fibonacci; use retry::OperationResult; use std::env; use std::fs; +use std::str::FromStr; use tracing::{info, warn}; use crate::scaleway::{ @@ -29,14 +29,13 @@ use crate::scaleway::{ }; use hashicorp_vault; use qovery_engine::build_platform::local_docker::LocalDocker; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd; use qovery_engine::constants::{ AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID, DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY, }; -use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; +use qovery_engine::io_models::{Context, Database, DatabaseKind, DatabaseMode, EnvironmentRequest, Features, Metadata}; use retry::Error::Operation; use serde::{Deserialize, Serialize}; @@ -50,8 +49,9 @@ use qovery_engine::cmd::docker::Docker; use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc}; use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC}; use qovery_engine::errors::CommandError; +use qovery_engine::io_models::DatabaseMode::MANAGED; use qovery_engine::logger::{Logger, StdIoLogger}; -use qovery_engine::models::DatabaseMode::MANAGED; +use qovery_engine::models::scaleway::ScwZone; use qovery_engine::runtime::block_on; use time::Instant; use url::Url; diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 5e7a8d51..e201076c 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -2,7 +2,7 @@ extern crate test_utilities; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; @@ -10,7 +10,7 @@ use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TY use self::test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; use test_utilities::common::{test_db, Infrastructure}; @@ -304,7 +304,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index c90ae51b..d45cc26f 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -389,7 +389,7 @@ fn build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -460,7 +460,7 @@ fn build_worker_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -583,7 +583,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -665,7 +665,7 @@ fn redeploy_same_app_with_ebs() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); let environment_check2 = environment.clone(); @@ -760,7 +760,7 @@ fn deploy_a_not_working_environment_and_after_working_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -838,7 +838,7 @@ fn deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index fcd475b7..48826a55 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -2,13 +2,13 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::{database_test_environment, test_db, Infrastructure}; use test_utilities::digitalocean::{ clean_environments, do_default_engine_config, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, @@ -332,7 +332,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index a15cf579..756422a6 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -331,7 +331,7 @@ fn digitalocean_doks_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -459,7 +459,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -542,7 +542,7 @@ fn digitalocean_doks_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -649,7 +649,7 @@ fn digitalocean_doks_deploy_a_not_working_environment_and_then_working_environme app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -728,7 +728,7 @@ fn digitalocean_doks_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::new(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index b91fbcb7..653e2ea2 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -4,8 +4,8 @@ use self::test_utilities::common::ClusterDomain; use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterTestType}; #[cfg(feature = "test-do-infra")] diff --git a/tests/digitalocean/do_spaces.rs b/tests/digitalocean/do_spaces.rs index e214ede1..3e6b9e8d 100644 --- a/tests/digitalocean/do_spaces.rs +++ b/tests/digitalocean/do_spaces.rs @@ -1,4 +1,4 @@ -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; +use qovery_engine::models::digital_ocean::DoRegion; use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use qovery_engine::object_storage::ObjectStorage; use tempfile::NamedTempFile; diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index 3a53d40c..2851c16f 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -1,6 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::digitalocean::application::DoRegion; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::digital_ocean::DoRegion; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; diff --git a/tests/scaleway/scw_container_registry.rs b/tests/scaleway/scw_container_registry.rs index 7aae7645..8ea49c8a 100644 --- a/tests/scaleway/scw_container_registry.rs +++ b/tests/scaleway/scw_container_registry.rs @@ -1,9 +1,9 @@ extern crate test_utilities; use self::test_utilities::utilities::{context, FuncTestsSecrets}; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; -use qovery_engine::models::NoOpProgressListener; +use qovery_engine::io_models::NoOpProgressListener; +use qovery_engine::models::scaleway::ScwZone; use std::sync::Arc; use tracing::debug; use uuid::Uuid; diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 489d7e4b..128e8917 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -2,14 +2,14 @@ use ::function_name::named; use tracing::{span, warn, Level}; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; -use qovery_engine::models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; +use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use qovery_engine::transaction::TransactionResult; use test_utilities::utilities::{ context, engine_run_test, generate_id, generate_password, get_pods, get_svc_name, init, is_pod_restarted_env, logger, FuncTestsSecrets, }; -use qovery_engine::models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use test_utilities::common::test_db; use test_utilities::common::{database_test_environment, Infrastructure}; use test_utilities::scaleway::{ @@ -337,7 +337,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { }; app }) - .collect::>(); + .collect::>(); environment.routers[0].routes[0].application_name = app_name; let environment_to_redeploy = environment.clone(); diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index ca0930a3..2fb6cdf1 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -7,7 +7,7 @@ use self::test_utilities::utilities::{ }; use ::function_name::named; use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; +use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; use std::collections::BTreeMap; use std::thread; @@ -341,7 +341,7 @@ fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() { app.dockerfile_path = None; app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -474,7 +474,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { }]; app }) - .collect::>(); + .collect::>(); let mut environment_delete = environment.clone(); environment_delete.action = Action::Delete; @@ -642,7 +642,7 @@ fn scaleway_kapsule_redeploy_same_app() { }]; app }) - .collect::>(); + .collect::>(); let environment_redeploy = environment.clone(); let environment_check1 = environment.clone(); @@ -751,7 +751,7 @@ fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environmen app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); let mut environment_for_delete = environment.clone(); environment_for_delete.action = Action::Delete; @@ -835,7 +835,7 @@ fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() { app.environment_vars = BTreeMap::default(); app }) - .collect::>(); + .collect::>(); // not working 2 let context_for_not_working_2 = context.clone_not_same_execution_id(); diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 0e6075be..952cc24d 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -4,8 +4,8 @@ use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETE use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; #[cfg(feature = "test-scw-infra")] diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index 2f6be0f9..bef7262d 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,6 +1,6 @@ use ::function_name::named; -use qovery_engine::cloud_provider::scaleway::application::ScwZone; use qovery_engine::cloud_provider::Kind; +use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; From d98e6893a85006c374d47f4951fb7e50844b7172 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 17:45:23 +0200 Subject: [PATCH 002/122] Cleanup --- src/models/application.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/models/application.rs b/src/models/application.rs index b7a01601..8eba144b 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -326,9 +326,7 @@ where event_details, self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_user_stateless_service(target, self) - }) + send_progress_on_long_task(self, Action::Create, || deploy_user_stateless_service(target, self)) } fn on_create_check(&self) -> Result<(), EngineError> { @@ -347,9 +345,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) + send_progress_on_long_task(self, Action::Create, || deploy_stateless_service_error(target, self)) } } @@ -369,7 +365,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { + send_progress_on_long_task(self, Action::Pause, || { scale_down_application(target, self, 0, if self.is_stateful() { Statefulset } else { Deployment }) }) } @@ -410,7 +406,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { + send_progress_on_long_task(self, Action::Delete, || { delete_stateless_service(target, self, event_details.clone()) }) } @@ -431,7 +427,7 @@ where self.logger(), ); - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { + send_progress_on_long_task(self, Action::Delete, || { delete_stateless_service(target, self, event_details.clone()) }) } From 46cabb498cc01710b5673303b00c3c79cf660d26 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Mon, 28 Mar 2022 18:06:42 +0200 Subject: [PATCH 003/122] Cleanup --- src/io_models.rs | 151 +++++++++++++++++------------------ test_utilities/src/common.rs | 84 ++++++++++--------- 2 files changed, 120 insertions(+), 115 deletions(-) diff --git a/src/io_models.rs b/src/io_models.rs index ab929c25..e9410a96 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -38,7 +38,7 @@ use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; -use crate::models::application::IApplication; +use crate::models::application::{ApplicationError, IApplication}; use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; @@ -112,16 +112,18 @@ impl EnvironmentRequest { cloud_provider: &dyn CloudProvider, container_registry: &ContainerRegistryInfo, logger: Box, - ) -> Environment { - //FIXME: remove those flatten as it hide errors regarding conversion to model data type - let applications = self - .applications - .iter() - .filter_map(|x| { - x.to_application_domain(context, x.to_build(container_registry), cloud_provider, logger.clone()) - }) - .collect::>(); + ) -> Result { + let mut applications = Vec::with_capacity(self.applications.len()); + for app in &self.applications { + match app.to_application_domain(context, app.to_build(container_registry), cloud_provider, logger.clone()) { + Ok(app) => applications.push(app), + Err(err) => { + return Err(err); + } + } + } + //FIXME: remove those flatten as it hide errors regarding conversion to model data type let routers = self .routers .iter() @@ -134,7 +136,7 @@ impl EnvironmentRequest { .filter_map(|x| x.to_database_domain(context, cloud_provider, logger.clone())) .collect::>(); - Environment::new( + Ok(Environment::new( self.id.as_str(), self.project_id.as_str(), self.owner_id.as_str(), @@ -143,7 +145,7 @@ impl EnvironmentRequest { applications, routers, databases, - ) + )) } } @@ -222,77 +224,68 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - AwsAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), - CPKind::Do => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_do_storage()).collect::>(), - environment_variables, - DoAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), - CPKind::Scw => Some(Box::new( - models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - self.start_timeout_in_seconds, - build, - self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), - environment_variables, - ScwAppExtraSettings {}, - listeners, - logger.clone(), - ) - .unwrap(), - )), + CPKind::Aws => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + AwsAppExtraSettings {}, + listeners, + logger.clone(), + )?)), + CPKind::Do => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_do_storage()).collect::>(), + environment_variables, + DoAppExtraSettings {}, + listeners, + logger.clone(), + )?)), + CPKind::Scw => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + self.start_timeout_in_seconds, + build, + self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), + environment_variables, + ScwAppExtraSettings {}, + listeners, + logger.clone(), + )?)), } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index b07d30c1..af35a0f8 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -108,12 +108,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> (Environment, TransactionResult) { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.build_environment( @@ -135,12 +137,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.deploy_environment_with_options( @@ -161,12 +165,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.pause_environment(&env); @@ -180,12 +186,14 @@ impl Infrastructure for EnvironmentRequest { engine_config: &EngineConfig, ) -> TransactionResult { let mut tx = Transaction::new(engine_config, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); - let env = environment.to_environment_domain( - engine_config.context(), - engine_config.cloud_provider(), - engine_config.container_registry().registry_info(), - logger, - ); + let env = environment + .to_environment_domain( + engine_config.context(), + engine_config.cloud_provider(), + engine_config.container_registry().registry_info(), + logger, + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); let _ = tx.delete_environment(&env); @@ -1474,12 +1482,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - let env = env.to_environment_domain( - &context, - engine.cloud_provider(), - engine.container_registry().registry_info(), - logger.clone(), - ); + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); if let Err(err) = deploy_env_tx.deploy_environment(&env) { panic!("{:?}", err) @@ -1593,12 +1603,14 @@ pub fn cluster_test( Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); // Deploy env - let env = env.to_environment_domain( - &context, - engine.cloud_provider(), - engine.container_registry().registry_info(), - logger.clone(), - ); + let env = env + .to_environment_domain( + &context, + engine.cloud_provider(), + engine.container_registry().registry_info(), + logger.clone(), + ) + .unwrap(); let env = Rc::new(RefCell::new(env)); if let Err(err) = destroy_env_tx.delete_environment(&env) { panic!("{:?}", err) From d222f89a4939f7418f2eafc48ff28d1b9bc3e1c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 29 Mar 2022 15:48:27 +0200 Subject: [PATCH 004/122] Router refacto preparation advanced settings (#670) * Router refacto --- src/cloud_provider/aws/mod.rs | 1 - src/cloud_provider/digitalocean/mod.rs | 1 - src/cloud_provider/digitalocean/router.rs | 494 ------------------- src/cloud_provider/environment.rs | 57 +-- src/cloud_provider/scaleway/mod.rs | 1 - src/cloud_provider/scaleway/router.rs | 444 ----------------- src/cloud_provider/service.rs | 11 +- src/io_models.rs | 51 +- src/models/application.rs | 6 +- src/models/aws/application.rs | 4 +- src/models/aws/mod.rs | 1 + src/models/aws/router.rs | 11 + src/models/digital_ocean/application.rs | 4 +- src/models/digital_ocean/mod.rs | 1 + src/models/digital_ocean/router.rs | 19 + src/models/mod.rs | 1 + src/{cloud_provider/aws => models}/router.rs | 403 +++++++++------ src/models/scaleway/application.rs | 4 +- src/models/scaleway/mod.rs | 1 + src/models/scaleway/router.rs | 11 + src/models/types.rs | 8 + src/transaction.rs | 6 +- 22 files changed, 337 insertions(+), 1203 deletions(-) delete mode 100644 src/cloud_provider/digitalocean/router.rs delete mode 100644 src/cloud_provider/scaleway/router.rs create mode 100644 src/models/aws/router.rs create mode 100644 src/models/digital_ocean/router.rs rename src/{cloud_provider/aws => models}/router.rs (74%) create mode 100644 src/models/scaleway/router.rs diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index 7dba78d6..fd93b941 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -15,7 +15,6 @@ use crate::runtime::block_on; pub mod databases; pub mod kubernetes; pub mod regions; -pub mod router; pub struct AWS { context: Context, diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index debccdf3..5aa63ab8 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -16,7 +16,6 @@ pub mod do_api_common; pub mod kubernetes; pub mod models; pub mod network; -pub mod router; pub struct DO { context: Context, diff --git a/src/cloud_provider/digitalocean/router.rs b/src/cloud_provider/digitalocean/router.rs deleted file mode 100644 index 1bc93804..00000000 --- a/src/cloud_provider/digitalocean/router.rs +++ /dev/null @@ -1,494 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::helm::ChartInfo; -use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; -use crate::cloud_provider::service::{ - default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm; -use crate::cmd::helm::Timeout; -use crate::errors::EngineError; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct RouterDo { - context: Context, - id: String, - action: Action, - name: String, - default_domain: String, - custom_domains: Vec, - sticky_sessions_enabled: bool, - routes: Vec, - listeners: Listeners, - logger: Box, -} - -impl RouterDo { - pub fn new( - context: Context, - id: &str, - name: &str, - action: Action, - default_domain: &str, - custom_domains: Vec, - routes: Vec, - sticky_sessions_enabled: bool, - listeners: Listeners, - logger: Box, - ) -> Self { - RouterDo { - context, - id: id.to_string(), - name: name.to_string(), - action, - default_domain: default_domain.to_string(), - custom_domains, - sticky_sessions_enabled, - routes, - listeners, - logger, - } - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "router" - } -} - -impl Service for RouterDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Router - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("router", self.name()) - } - - fn version(&self) -> String { - "1.0".to_string() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - None - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - "1".to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - 1 - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - false - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - context.insert("doks_cluster_id", kubernetes.id()); - - let applications = environment - .stateless_services() - .into_iter() - .filter(|x| x.service_type() == ServiceType::Application) - .collect::>(); - - // it's a loop, but we can manage only one custom domain at a time. DO do not support more because of LB limitations - // we'll have to change it in the future, not urgent - let custom_domain_data_templates = self - .custom_domains - .iter() - .map(|cd| { - let domain_hash = crate::crypto::to_sha1_truncate_16(cd.domain.as_str()); - - // https://github.com/digitalocean/digitalocean-cloud-controller-manager/issues/291 - // Can only manage 1 host at a time on an DO load balancer - context.insert("custom_domain_name", cd.domain.as_str()); - - CustomDomainDataTemplate { - domain: cd.domain.clone(), - domain_hash, - target_domain: cd.target_domain.clone(), - } - }) - .collect::>(); - - let route_data_templates = self - .routes - .iter() - .filter_map(|r| { - match applications - .iter() - .find(|app| app.name() == r.application_name.as_str()) - { - Some(application) => application.private_port().map(|private_port| RouteDataTemplate { - path: r.path.clone(), - application_name: application.sanitized_name(), - application_port: private_port, - }), - _ => None, - } - }) - .collect::>(); - - // autoscaler - context.insert("nginx_enable_horizontal_autoscaler", "false"); - context.insert("nginx_minimum_replicas", "1"); - context.insert("nginx_maximum_replicas", "10"); - // resources - context.insert("nginx_requests_cpu", "200m"); - context.insert("nginx_requests_memory", "128Mi"); - context.insert("nginx_limit_cpu", "200m"); - context.insert("nginx_limit_memory", "128Mi"); - - let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - // Default domain - let external_ingress_hostname_default = crate::cmd::kubectl::kubectl_exec_get_external_ingress_hostname( - kubernetes_config_file_path, - "nginx-ingress", - "nginx-ingress-ingress-nginx-controller", - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - match external_ingress_hostname_default { - Ok(external_ingress_hostname_default) => match external_ingress_hostname_default { - Some(hostname) => context.insert("external_ingress_hostname_default", hostname.as_str()), - None => { - // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log(EngineEvent::Warning( - event_details, - EventMessage::new_from_safe( - "Error while trying to get Load Balancer hostname from Kubernetes cluster".to_string(), - ), - )); - } - }, - _ => { - // FIXME really? - // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log(EngineEvent::Warning( - event_details, - EventMessage::new_from_safe("Can't fetch external ingress hostname.".to_string()), - )); - } - } - - let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str()); - - let tls_domain = format!("*.{}", kubernetes.dns_provider().domain()); - context.insert("router_tls_domain", tls_domain.as_str()); - context.insert("router_default_domain", self.default_domain.as_str()); - context.insert("router_default_domain_hash", router_default_domain_hash.as_str()); - context.insert("custom_domains", &custom_domain_data_templates); - context.insert("routes", &route_data_templates); - context.insert("spec_acme_email", "tls@qovery.com"); // TODO CHANGE ME - context.insert("metadata_annotations_cert_manager_cluster_issuer", "letsencrypt-qovery"); - - let lets_encrypt_url = match self.context.is_test_cluster() { - true => "https://acme-staging-v02.api.letsencrypt.org/directory", - false => "https://acme-v02.api.letsencrypt.org/directory", - }; - context.insert("spec_acme_server", lets_encrypt_url); - - // Nginx - context.insert("sticky_sessions_enabled", &self.sticky_sessions_enabled); - - Ok(context) - } - - fn selector(&self) -> Option { - Some(format!("routerId={}", self.id)) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } -} - -impl Router for RouterDo { - fn domains(&self) -> Vec<&str> { - let mut _domains = vec![self.default_domain.as_str()]; - - for domain in &self.custom_domains { - _domains.push(domain.domain.as_str()); - } - - _domains - } - - fn has_custom_domains(&self) -> bool { - !self.custom_domains.is_empty() - } -} - -impl Helm for RouterDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("router-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/charts/ingress-nginx", self.context().lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/digitalocean/chart_values/nginx-ingress", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl Listen for RouterDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -impl StatelessService for RouterDo { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl ToTransmitter for RouterDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Router(self.id().to_string(), self.name().to_string()) - } -} - -impl Create for RouterDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let workspace_dir = self.workspace_directory(); - let helm_release_name = self.helm_release_name(); - - let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - // respect order - getting the context here and not before is mandatory - // the nginx-ingress must be available to get the external dns target if necessary - let context = self.tera_context(target)?; - - let from_dir = format!("{}/digitalocean/charts/q-ingress-tls", self.context.lib_root_dir()); - if let Err(e) = - crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - from_dir, - workspace_dir, - e, - )); - } - - // do exec helm upgrade and return the last deployment status - let helm = helm::Helm::new( - &kubernetes_config_file_path, - &kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| helm::to_engine_error(&event_details, e))?; - let chart = ChartInfo::new_from_custom_namespace( - helm_release_name, - workspace_dir.clone(), - environment.namespace().to_string(), - 600_i64, - match self.service_type() { - ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)], - _ => vec![], - }, - false, - self.selector(), - ); - - helm.upgrade(&chart, &[]) - .map_err(|e| helm::to_engine_error(&event_details, e)) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - - // check non custom domains - self.check_domains(event_details.clone(), self.logger())?; - - // Wait/Check that custom domain is a CNAME targeting qovery - for domain_to_check in self.custom_domains.iter() { - match check_cname_for( - self.progress_scope(), - self.listeners(), - &domain_to_check.domain, - self.context.execution_id(), - ) { - Ok(cname) if cname.trim_end_matches('.') == domain_to_check.target_domain.trim_end_matches('.') => { - continue; - } - Ok(err) | Err(err) => { - // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), - ), - )); - } - } - } - - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for RouterDo { - #[named] - fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Delete for RouterDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - delete_router(target, self, event_details) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - delete_router(target, self, event_details) - } -} diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 916e8759..2fb5411b 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,6 +1,5 @@ -use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, Database, IRouter, StatefulService, StatelessService}; use crate::models::application::IApplication; -use crate::unit_conversion::cpu_string_to_float; pub struct Environment { namespace: String, @@ -10,7 +9,7 @@ pub struct Environment { pub organization_id: String, pub action: Action, pub applications: Vec>, - pub routers: Vec>, + pub routers: Vec>, pub databases: Vec>, } @@ -22,7 +21,7 @@ impl Environment { organization_id: &str, action: Action, applications: Vec>, - routers: Vec>, + routers: Vec>, databases: Vec>, ) -> Self { Environment { @@ -69,54 +68,4 @@ impl Environment { pub fn namespace(&self) -> &str { self.namespace.as_str() } - - /// compute the required resources for this environment from - /// applications, external services, routers, and databases - /// Note: Even if external services don't run on the targeted Kubernetes cluster, it requires CPU and memory resources to run the container(s) - pub fn required_resources(&self) -> EnvironmentResources { - let mut total_cpu_for_stateless_services: f32 = 0.0; - let mut total_ram_in_mib_for_stateless_services: u32 = 0; - let mut required_pods = self.stateless_services().len() as u32; - - for service in self.stateless_services() { - match service.action() { - Action::Create | Action::Nothing => { - total_cpu_for_stateless_services += cpu_string_to_float(&service.total_cpus()); - total_ram_in_mib_for_stateless_services += &service.total_ram_in_mib(); - required_pods += service.max_instances() - } - Action::Delete | Action::Pause => {} - } - } - - let mut total_cpu_for_stateful_services: f32 = 0.0; - let mut total_ram_in_mib_for_stateful_services: u32 = 0; - for service in self.stateful_services() { - if service.is_managed_service() { - // If it is a managed service, we don't care of its resources as it is not managed by us - continue; - } - - match service.action() { - Action::Pause | Action::Delete => { - total_cpu_for_stateful_services += cpu_string_to_float(service.total_cpus()); - total_ram_in_mib_for_stateful_services += service.total_ram_in_mib(); - required_pods += service.max_instances() - } - Action::Create | Action::Nothing => {} - } - } - - EnvironmentResources { - pods: required_pods, - cpu: total_cpu_for_stateless_services + total_cpu_for_stateful_services, - ram_in_mib: total_ram_in_mib_for_stateless_services + total_ram_in_mib_for_stateful_services, - } - } -} - -pub struct EnvironmentResources { - pub pods: u32, - pub cpu: f32, - pub ram_in_mib: u32, } diff --git a/src/cloud_provider/scaleway/mod.rs b/src/cloud_provider/scaleway/mod.rs index ceaf8c3c..7311f342 100644 --- a/src/cloud_provider/scaleway/mod.rs +++ b/src/cloud_provider/scaleway/mod.rs @@ -8,7 +8,6 @@ use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; pub mod databases; pub mod kubernetes; -pub mod router; pub struct Scaleway { context: Context, diff --git a/src/cloud_provider/scaleway/router.rs b/src/cloud_provider/scaleway/router.rs deleted file mode 100644 index f7c3cb95..00000000 --- a/src/cloud_provider/scaleway/router.rs +++ /dev/null @@ -1,444 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::helm::ChartInfo; -use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; -use crate::cloud_provider::service::{ - default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, -}; -use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm; -use crate::cmd::helm::Timeout; -use crate::errors::EngineError; -use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct RouterScw { - context: Context, - id: String, - action: Action, - name: String, - default_domain: String, - custom_domains: Vec, - sticky_sessions_enabled: bool, - routes: Vec, - listeners: Listeners, - logger: Box, -} - -impl RouterScw { - pub fn new( - context: Context, - id: &str, - name: &str, - action: Action, - default_domain: &str, - custom_domains: Vec, - routes: Vec, - sticky_sessions_enabled: bool, - listeners: Listeners, - logger: Box, - ) -> Self { - RouterScw { - context, - id: id.to_string(), - name: name.to_string(), - action, - default_domain: default_domain.to_string(), - custom_domains, - sticky_sessions_enabled, - routes, - listeners, - logger, - } - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "router" - } -} - -impl Service for RouterScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Router - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("router", self.name()) - } - - fn version(&self) -> String { - "1.0".to_string() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - None - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - "1".to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - 1 - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - false - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let kubernetes = target.kubernetes; - let environment = target.environment; - - let mut context = default_tera_context(self, kubernetes, environment); - - let applications = environment - .stateless_services() - .into_iter() - .filter(|x| x.service_type() == ServiceType::Application) - .collect::>(); - - let custom_domain_data_templates = self - .custom_domains - .iter() - .map(|cd| { - let domain_hash = crate::crypto::to_sha1_truncate_16(cd.domain.as_str()); - CustomDomainDataTemplate { - domain: cd.domain.clone(), - domain_hash, - target_domain: cd.target_domain.clone(), - } - }) - .collect::>(); - - let route_data_templates = self - .routes - .iter() - .filter_map(|r| { - match applications - .iter() - .find(|app| app.name() == r.application_name.as_str()) - { - Some(application) => application.private_port().map(|private_port| RouteDataTemplate { - path: r.path.clone(), - application_name: application.sanitized_name(), - application_port: private_port, - }), - _ => None, - } - }) - .collect::>(); - - let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str()); - let tls_domain = kubernetes.dns_provider().domain().wildcarded(); - - context.insert("router_tls_domain", tls_domain.to_string().as_str()); - context.insert("router_default_domain", self.default_domain.as_str()); - context.insert("router_default_domain_hash", router_default_domain_hash.as_str()); - context.insert("custom_domains", &custom_domain_data_templates); - context.insert("routes", &route_data_templates); - context.insert("spec_acme_email", "tls@qovery.com"); // TODO CHANGE ME - context.insert("metadata_annotations_cert_manager_cluster_issuer", "letsencrypt-qovery"); - - let lets_encrypt_url = match self.context.is_test_cluster() { - true => "https://acme-staging-v02.api.letsencrypt.org/directory", - false => "https://acme-v02.api.letsencrypt.org/directory", - }; - context.insert("spec_acme_server", lets_encrypt_url); - - // Nginx - context.insert("sticky_sessions_enabled", &self.sticky_sessions_enabled); - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("routerId={}", self.id)) - } -} - -impl Router for RouterScw { - fn domains(&self) -> Vec<&str> { - let mut _domains = vec![self.default_domain.as_str()]; - - for domain in &self.custom_domains { - _domains.push(domain.domain.as_str()); - } - - _domains - } - - fn has_custom_domains(&self) -> bool { - !self.custom_domains.is_empty() - } -} - -impl Helm for RouterScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("router-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/charts/ingress-nginx", self.context().lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/scaleway/chart_values/nginx-ingress", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl Listen for RouterScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -impl StatelessService for RouterScw { - fn as_stateless_service(&self) -> &dyn StatelessService { - self - } -} - -impl ToTransmitter for RouterScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Router(self.id().to_string(), self.name().to_string()) - } -} - -impl Create for RouterScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let workspace_dir = self.workspace_directory(); - let helm_release_name = self.helm_release_name(); - - let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - // respect order - getting the context here and not before is mandatory - // the nginx-ingress must be available to get the external dns target if necessary - let context = self.tera_context(target)?; - - let from_dir = format!("{}/scaleway/charts/q-ingress-tls", self.context.lib_root_dir()); - if let Err(e) = - crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - from_dir, - workspace_dir, - e, - )); - } - - // do exec helm upgrade and return the last deployment status - let helm = helm::Helm::new( - &kubernetes_config_file_path, - &kubernetes.cloud_provider().credentials_environment_variables(), - ) - .map_err(|e| helm::to_engine_error(&event_details, e))?; - - let chart = ChartInfo::new_from_custom_namespace( - helm_release_name, - workspace_dir.clone(), - environment.namespace().to_string(), - 600_i64, - match self.service_type() { - ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)], - _ => vec![], - }, - false, - self.selector(), - ); - - helm.upgrade(&chart, &[]) - .map_err(|e| helm::to_engine_error(&event_details, e)) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - - // check non custom domains - self.check_domains(event_details.clone(), self.logger())?; - - // Wait/Check that custom domain is a CNAME targeting qovery - for domain_to_check in self.custom_domains.iter() { - match check_cname_for( - self.progress_scope(), - self.listeners(), - &domain_to_check.domain, - self.context.execution_id(), - ) { - Ok(cname) if cname.trim_end_matches('.') == domain_to_check.target_domain.trim_end_matches('.') => { - continue - } - Ok(err) | Err(err) => { - // TODO(benjaminch): Handle better this one via a proper error eventually - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new( - format!( - "Invalid CNAME for {}. Might not be an issue if user is using a CDN.", - domain_to_check.domain, - ), - Some(err.to_string()), - ), - )); - } - } - } - - Ok(()) - } - - #[named] - fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateless_service_error(target, self) - }) - } -} - -impl Pause for RouterScw { - #[named] - fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Delete for RouterScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - delete_router(target, self, event_details) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - delete_router(target, self, event_details) - } -} diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index b3111f51..ecf8b2e9 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -157,7 +157,7 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait Router: StatelessService + Listen + Helm { +pub trait IRouter: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; fn has_custom_domains(&self) -> bool; fn check_domains(&self, event_details: EventDetails, logger: &dyn Logger) -> Result<(), EngineError> { @@ -524,15 +524,6 @@ pub fn scale_down_application( }) } -pub fn delete_router(target: &DeploymentTarget, service: &T, event_details: EventDetails) -> Result<(), EngineError> -where - T: Router, -{ - send_progress_on_long_task(service, crate::cloud_provider::service::Action::Delete, || { - delete_stateless_service(target, service, event_details.clone()) - }) -} - pub fn delete_stateless_service( target: &DeploymentTarget, service: &T, diff --git a/src/io_models.rs b/src/io_models.rs index e9410a96..249df2cc 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -18,19 +18,16 @@ use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; use crate::cloud_provider::aws::databases::mysql::MySQLAws; use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; use crate::cloud_provider::aws::databases::redis::RedisAws; -use crate::cloud_provider::aws::router::RouterAws; use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; use crate::cloud_provider::digitalocean::databases::redis::RedisDo; -use crate::cloud_provider::digitalocean::router::RouterDo; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; use crate::cloud_provider::scaleway::databases::redis::RedisScw; -use crate::cloud_provider::scaleway::router::RouterScw; -use crate::cloud_provider::service::DatabaseOptions; +use crate::cloud_provider::service::{DatabaseOptions, IRouter}; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; @@ -39,9 +36,10 @@ use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; use crate::models::application::{ApplicationError, IApplication}; -use crate::models::aws::{AwsAppExtraSettings, AwsStorageType}; -use crate::models::digital_ocean::{DoAppExtraSettings, DoStorageType}; -use crate::models::scaleway::{ScwAppExtraSettings, ScwStorageType}; +use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; +use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; +use crate::models::router::RouterError; +use crate::models::scaleway::{ScwAppExtraSettings, ScwRouterExtraSettings, ScwStorageType}; use crate::models::types::{AWS, DO, SCW}; #[derive(Clone, Debug, PartialEq)] @@ -123,12 +121,16 @@ impl EnvironmentRequest { } } - //FIXME: remove those flatten as it hide errors regarding conversion to model data type - let routers = self - .routers - .iter() - .filter_map(|x| x.to_router_domain(context, cloud_provider, logger.clone())) - .collect::>(); + let mut routers = Vec::with_capacity(self.routers.len()); + for router in &self.routers { + match router.to_router_domain(context, cloud_provider, logger.clone()) { + Ok(router) => routers.push(router), + Err(err) => { + //FIXME: propagate the correct Error + return Err(ApplicationError::InvalidConfig(format!("{}", err))); + } + } + } let databases = self .databases @@ -507,7 +509,7 @@ impl Router { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Result, RouterError> { let custom_domains = self .custom_domains .iter() @@ -530,7 +532,7 @@ impl Router { match cloud_provider.kind() { CPKind::Aws => { - let router = Box::new(RouterAws::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -539,13 +541,14 @@ impl Router { custom_domains, routes, self.sticky_sessions_enabled, + AwsRouterExtraSettings {}, listeners, logger, - )); - Some(router) + )?); + Ok(router) } CPKind::Do => { - let router = Box::new(RouterDo::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -554,13 +557,14 @@ impl Router { custom_domains, routes, self.sticky_sessions_enabled, + DoRouterExtraSettings {}, listeners, logger, - )); - Some(router) + )?); + Ok(router) } CPKind::Scw => { - let router = Box::new(RouterScw::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -569,10 +573,11 @@ impl Router { custom_domains, routes, self.sticky_sessions_enabled, + ScwRouterExtraSettings {}, listeners, logger, - )); - Some(router) + )?); + Ok(router) } } } diff --git a/src/models/application.rs b/src/models/application.rs index 8eba144b..4de1860a 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -13,7 +13,7 @@ use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; use crate::io_models::{Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; -use crate::models::types::CloudProvider; +use crate::models::types::{CloudProvider, ToTeraContext}; use function_name::named; use std::marker::PhantomData; use tera::Context as TeraContext; @@ -204,10 +204,6 @@ impl Listen for Application { } } -pub(crate) trait ToTeraContext { - fn to_tera_context(&self, target: &DeploymentTarget) -> Result; -} - impl Service for Application where Application: ToTeraContext, diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs index 1d21f284..85986e74 100644 --- a/src/models/aws/application.rs +++ b/src/models/aws/application.rs @@ -5,9 +5,9 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::{Application, ToTeraContext}; +use crate::models::application::Application; use crate::models::aws::AwsStorageType; -use crate::models::types::AWS; +use crate::models::types::{ToTeraContext, AWS}; use tera::Context as TeraContext; impl ToTeraContext for Application { diff --git a/src/models/aws/mod.rs b/src/models/aws/mod.rs index 2bbfbc4f..e72d16ea 100644 --- a/src/models/aws/mod.rs +++ b/src/models/aws/mod.rs @@ -1,4 +1,5 @@ pub mod application; +pub mod router; use crate::models::types::CloudProvider; use crate::models::types::AWS; diff --git a/src/models/aws/router.rs b/src/models/aws/router.rs new file mode 100644 index 00000000..8964c4f9 --- /dev/null +++ b/src/models/aws/router.rs @@ -0,0 +1,11 @@ +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::models::router::Router; +use crate::models::types::{ToTeraContext, AWS}; +use tera::Context as TeraContext; + +impl ToTeraContext for Router { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.default_tera_context(target) + } +} diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs index 5a2e7a61..664c8bde 100644 --- a/src/models/digital_ocean/application.rs +++ b/src/models/digital_ocean/application.rs @@ -5,9 +5,9 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::{Application, ToTeraContext}; +use crate::models::application::Application; use crate::models::digital_ocean::DoStorageType; -use crate::models::types::DO; +use crate::models::types::{ToTeraContext, DO}; use tera::Context as TeraContext; impl ToTeraContext for Application { diff --git a/src/models/digital_ocean/mod.rs b/src/models/digital_ocean/mod.rs index 30c11461..e2a3d062 100644 --- a/src/models/digital_ocean/mod.rs +++ b/src/models/digital_ocean/mod.rs @@ -1,4 +1,5 @@ mod application; +mod router; use crate::errors::CommandError; use crate::models::types::CloudProvider; diff --git a/src/models/digital_ocean/router.rs b/src/models/digital_ocean/router.rs new file mode 100644 index 00000000..a978c7c7 --- /dev/null +++ b/src/models/digital_ocean/router.rs @@ -0,0 +1,19 @@ +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::models::router::Router; +use crate::models::types::{ToTeraContext, DO}; +use tera::Context as TeraContext; + +impl ToTeraContext for Router { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let mut context = self.default_tera_context(target)?; + context.insert("doks_cluster_id", target.kubernetes.id()); + if let Some(domain) = self.custom_domains.first() { + // https://github.com/digitalocean/digitalocean-cloud-controller-manager/issues/291 + // Can only manage 1 host at a time on an DO load balancer + context.insert("custom_domain_name", domain.domain.as_str()); + } + + Ok(context) + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs index 8db33ab5..f0b21d4f 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -1,5 +1,6 @@ pub mod application; pub mod aws; pub mod digital_ocean; +pub mod router; pub mod scaleway; pub mod types; diff --git a/src/cloud_provider/aws/router.rs b/src/models/router.rs similarity index 74% rename from src/cloud_provider/aws/router.rs rename to src/models/router.rs index 769180f4..829f0053 100644 --- a/src/cloud_provider/aws/router.rs +++ b/src/models/router.rs @@ -1,10 +1,8 @@ -use tera::Context as TeraContext; - use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ - default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create, - Delete, Helm, Pause, Router as RRouter, Router, Service, ServiceType, StatelessService, + default_tera_context, delete_stateless_service, deploy_stateless_service_error, send_progress_on_long_task, Action, + Create, Delete, Helm, IRouter, Pause, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -14,22 +12,35 @@ use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; -use ::function_name::named; +use crate::models::types::CloudProvider; +use crate::models::types::ToTeraContext; +use function_name::named; +use std::borrow::Borrow; +use std::marker::PhantomData; +use tera::Context as TeraContext; -pub struct RouterAws { - context: Context, - id: String, - name: String, - action: Action, - default_domain: String, - custom_domains: Vec, - sticky_sessions_enabled: bool, - routes: Vec, - listeners: Listeners, - logger: Box, +#[derive(thiserror::Error, Debug)] +pub enum RouterError { + #[error("Router invalid configuration: {0}")] + InvalidConfig(String), } -impl RouterAws { +pub struct Router { + _marker: PhantomData, + pub(crate) context: Context, + pub(crate) id: String, + pub(crate) action: Action, + pub(crate) name: String, + pub(crate) default_domain: String, + pub(crate) custom_domains: Vec, + pub(crate) sticky_sessions_enabled: bool, + pub(crate) routes: Vec, + pub(crate) listeners: Listeners, + pub(crate) logger: Box, + pub(crate) _extra_settings: T::RouterExtraSettings, +} + +impl Router { pub fn new( context: Context, id: &str, @@ -39,10 +50,12 @@ impl RouterAws { custom_domains: Vec, routes: Vec, sticky_sessions_enabled: bool, + extra_settings: T::RouterExtraSettings, listeners: Listeners, logger: Box, - ) -> Self { - RouterAws { + ) -> Result { + Ok(Self { + _marker: PhantomData, context, id: id.to_string(), name: name.to_string(), @@ -53,80 +66,18 @@ impl RouterAws { routes, listeners, logger, - } + _extra_settings: extra_settings, + }) } - fn cloud_provider_name(&self) -> &str { - "aws" + fn selector(&self) -> Option { + Some(format!("routerId={}", self.id)) } - fn struct_name(&self) -> &str { - "router" - } -} - -impl Service for RouterAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Router - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("router", self.name()) - } - - fn version(&self) -> String { - "1.0".to_string() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - None - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - "1".to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - 1 - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - false - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { + pub(crate) fn default_tera_context(&self, target: &DeploymentTarget) -> Result + where + Self: Service, + { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; let environment = target.environment; @@ -212,8 +163,8 @@ impl Service for RouterAws { let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str()); - let tls_domain = format!("*.{}", kubernetes.dns_provider().domain()); - context.insert("router_tls_domain", tls_domain.as_str()); + let tls_domain = kubernetes.dns_provider().domain().wildcarded(); + context.insert("router_tls_domain", tls_domain.to_string().as_str()); context.insert("router_default_domain", self.default_domain.as_str()); context.insert("router_default_domain_hash", router_default_domain_hash.as_str()); context.insert("custom_domains", &custom_domain_data_templates); @@ -232,55 +183,15 @@ impl Service for RouterAws { Ok(context) } +} - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("routerId={}", self.id)) +impl ToTransmitter for Router { + fn to_transmitter(&self) -> Transmitter { + Transmitter::Router(self.id.to_string(), self.name.to_string()) } } -impl Router for RouterAws { - fn domains(&self) -> Vec<&str> { - let mut _domains = vec![self.default_domain.as_str()]; - - for domain in &self.custom_domains { - _domains.push(domain.domain.as_str()); - } - - _domains - } - - fn has_custom_domains(&self) -> bool { - !self.custom_domains.is_empty() - } -} - -impl Helm for RouterAws { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("router-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/charts/ingress-nginx", self.context().lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/aws/chart_values/nginx-ingress", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - String::new() - } -} - -impl Listen for RouterAws { +impl Listen for Router { fn listeners(&self) -> &Listeners { &self.listeners } @@ -290,25 +201,119 @@ impl Listen for RouterAws { } } -impl StatelessService for RouterAws { - fn as_stateless_service(&self) -> &dyn StatelessService { - self +impl Helm for Router { + fn helm_selector(&self) -> Option { + self.selector() + } + + fn helm_release_name(&self) -> String { + crate::string::cut(format!("router-{}", self.id), 50) + } + + fn helm_chart_dir(&self) -> String { + format!("{}/common/charts/ingress-nginx", self.context.lib_root_dir()) + } + + fn helm_chart_values_dir(&self) -> String { + format!( + "{}/{}/chart_values/nginx-ingress", + self.context.lib_root_dir(), + T::helm_directory_name() + ) + } + + fn helm_chart_external_name_service_dir(&self) -> String { + String::new() } } -impl ToTransmitter for RouterAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Router(self.id().to_string(), self.name().to_string()) +impl Service for Router +where + Router: ToTeraContext, +{ + fn context(&self) -> &Context { + &self.context + } + + fn service_type(&self) -> ServiceType { + ServiceType::Router + } + + fn id(&self) -> &str { + &self.id + } + + fn name(&self) -> &str { + &self.name + } + + fn sanitized_name(&self) -> String { + sanitize_name("router", self.id()) + } + + fn version(&self) -> String { + "1.0".to_string() + } + + fn action(&self) -> &Action { + &self.action + } + + fn private_port(&self) -> Option { + None + } + + fn start_timeout(&self) -> Timeout { + Timeout::Default + } + + fn total_cpus(&self) -> String { + "1".to_string() + } + + fn cpu_burst(&self) -> String { + "1".to_string() + } + + fn total_ram_in_mib(&self) -> u32 { + 1 + } + + fn min_instances(&self) -> u32 { + 1 + } + + fn max_instances(&self) -> u32 { + 1 + } + + fn publicly_accessible(&self) -> bool { + false + } + + fn tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context(target) + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn selector(&self) -> Option { + self.selector() } } -impl Create for RouterAws { +impl Create for Router +where + Router: Service, +{ #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details.clone(), @@ -325,7 +330,11 @@ impl Create for RouterAws { // the nginx-ingress must be available to get the external dns target if necessary let context = self.tera_context(target)?; - let from_dir = format!("{}/aws/charts/q-ingress-tls", self.context.lib_root_dir()); + let from_dir = format!( + "{}/{}/charts/q-ingress-tls", + self.context.lib_root_dir(), + T::helm_directory_name() + ); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) { @@ -361,8 +370,17 @@ impl Create for RouterAws { .map_err(|e| EngineError::new_helm_error(event_details.clone(), e)) } + #[named] fn on_create_check(&self) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + T::short_name(), + "router", + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); // check non custom domains self.check_domains(event_details.clone(), self.logger())?; @@ -376,7 +394,7 @@ impl Create for RouterAws { self.context.execution_id(), ) { Ok(cname) if cname.trim_end_matches('.') == domain_to_check.target_domain.trim_end_matches('.') => { - continue + continue; } Ok(err) | Err(err) => { // TODO(benjaminch): Handle better this one via a proper error eventually @@ -401,8 +419,8 @@ impl Create for RouterAws { fn on_create_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details, @@ -415,13 +433,16 @@ impl Create for RouterAws { } } -impl Pause for RouterAws { +impl Pause for Router +where + Router: Service, +{ #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details, @@ -430,7 +451,18 @@ impl Pause for RouterAws { Ok(()) } + #[named] fn on_pause_check(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + T::short_name(), + "router", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } @@ -438,8 +470,8 @@ impl Pause for RouterAws { fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details, @@ -449,22 +481,39 @@ impl Pause for RouterAws { } } -impl Delete for RouterAws { +impl Delete for Router +where + Router: Service, +{ #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details.clone(), self.logger(), ); - delete_router(target, self, event_details) + + send_progress_on_long_task(self, Action::Delete, || { + delete_stateless_service(target, self, event_details.clone()) + }) } + #[named] fn on_delete_check(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + T::short_name(), + "router", + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } @@ -472,13 +521,45 @@ impl Delete for RouterAws { fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); print_action( - self.cloud_provider_name(), - self.struct_name(), + T::short_name(), + "router", function_name!(), self.name(), event_details.clone(), self.logger(), ); - delete_router(target, self, event_details) + + send_progress_on_long_task(self, Action::Delete, || { + delete_stateless_service(target, self, event_details.clone()) + }) + } +} + +impl StatelessService for Router +where + Router: Service, +{ + fn as_stateless_service(&self) -> &dyn StatelessService { + self + } +} + +impl IRouter for Router +where + Router: Service, +{ + fn domains(&self) -> Vec<&str> { + let mut domains = Vec::with_capacity(1 + self.custom_domains.len()); + domains.push(self.default_domain.as_str()); + + for domain in &self.custom_domains { + domains.push(domain.domain.as_str()); + } + + domains + } + + fn has_custom_domains(&self) -> bool { + !self.custom_domains.is_empty() } } diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs index 2b14300f..e43883dd 100644 --- a/src/models/scaleway/application.rs +++ b/src/models/scaleway/application.rs @@ -5,9 +5,9 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::{Application, ToTeraContext}; +use crate::models::application::Application; use crate::models::scaleway::ScwStorageType; -use crate::models::types::SCW; +use crate::models::types::{ToTeraContext, SCW}; use tera::Context as TeraContext; impl ToTeraContext for Application { diff --git a/src/models/scaleway/mod.rs b/src/models/scaleway/mod.rs index ab73a293..b9067fc7 100644 --- a/src/models/scaleway/mod.rs +++ b/src/models/scaleway/mod.rs @@ -1,4 +1,5 @@ mod application; +mod router; use crate::errors::CommandError; use crate::models::types::CloudProvider; diff --git a/src/models/scaleway/router.rs b/src/models/scaleway/router.rs new file mode 100644 index 00000000..12828657 --- /dev/null +++ b/src/models/scaleway/router.rs @@ -0,0 +1,11 @@ +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use crate::models::router::Router; +use crate::models::types::{ToTeraContext, SCW}; +use tera::Context as TeraContext; + +impl ToTeraContext for Router { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.default_tera_context(target) + } +} diff --git a/src/models/types.rs b/src/models/types.rs index 75fef0ea..59cb6b23 100644 --- a/src/models/types.rs +++ b/src/models/types.rs @@ -1,3 +1,7 @@ +use crate::cloud_provider::DeploymentTarget; +use crate::errors::EngineError; +use tera::Context as TeraContext; + // Those types are just marker types that are use to tag our struct/object model pub struct AWS {} pub struct DO {} @@ -17,3 +21,7 @@ pub trait CloudProvider { fn registry_full_name() -> &'static str; fn helm_directory_name() -> &'static str; } + +pub(crate) trait ToTeraContext { + fn to_tera_context(&self, target: &DeploymentTarget) -> Result; +} diff --git a/src/transaction.rs b/src/transaction.rs index 785c3b6e..95470bfc 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -194,9 +194,9 @@ impl<'a> Transaction<'a> { // logging let image_name = app.get_build().image.full_image_name_with_tag(); let msg = match &build_result { - Ok(_) => format!("✅ Container {} is built", &image_name), - Err(BuildError::Aborted(_)) => format!("🚫 Container {} build has been canceled", &image_name), - Err(err) => format!("❌ Container {} failed to be build: {}", &image_name, err), + Ok(_) => format!("✅ Container image {} is built and ready to use", &image_name), + Err(BuildError::Aborted(_)) => format!("🚫 Container image {} build has been canceled", &image_name), + Err(err) => format!("❌ Container image {} failed to be build: {}", &image_name, err), }; let progress_info = ProgressInfo::new( From f7136d07631d220c95b8d6232676ca3a9c0b2b4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 29 Mar 2022 15:52:32 +0200 Subject: [PATCH 005/122] Rename Application and Router traits/structs (#671) --- src/cloud_provider/environment.rs | 12 ++++----- src/cloud_provider/service.rs | 2 +- src/io_models.rs | 20 +++++++------- src/models/application.rs | 36 ++++++++++++------------- src/models/aws/application.rs | 4 +-- src/models/aws/router.rs | 4 +-- src/models/digital_ocean/application.rs | 4 +-- src/models/digital_ocean/router.rs | 4 +-- src/models/router.rs | 36 ++++++++++++------------- src/models/scaleway/application.rs | 4 +-- src/models/scaleway/router.rs | 4 +-- src/transaction.rs | 4 +-- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 2fb5411b..ec791a04 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,5 +1,5 @@ -use crate::cloud_provider::service::{Action, Database, IRouter, StatefulService, StatelessService}; -use crate::models::application::IApplication; +use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; +use crate::models::application::Application; pub struct Environment { namespace: String, @@ -8,8 +8,8 @@ pub struct Environment { pub owner_id: String, pub organization_id: String, pub action: Action, - pub applications: Vec>, - pub routers: Vec>, + pub applications: Vec>, + pub routers: Vec>, pub databases: Vec>, } @@ -20,8 +20,8 @@ impl Environment { owner_id: &str, organization_id: &str, action: Action, - applications: Vec>, - routers: Vec>, + applications: Vec>, + routers: Vec>, databases: Vec>, ) -> Self { Environment { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index ecf8b2e9..ad1b1e1f 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -157,7 +157,7 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait IRouter: StatelessService + Listen + Helm { +pub trait Router: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; fn has_custom_domains(&self) -> bool; fn check_domains(&self, event_details: EventDetails, logger: &dyn Logger) -> Result<(), EngineError> { diff --git a/src/io_models.rs b/src/io_models.rs index 249df2cc..572d61f4 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -27,7 +27,7 @@ use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; use crate::cloud_provider::scaleway::databases::redis::RedisScw; -use crate::cloud_provider::service::{DatabaseOptions, IRouter}; +use crate::cloud_provider::service::{DatabaseOptions, Router}; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; @@ -35,7 +35,7 @@ use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; -use crate::models::application::{ApplicationError, IApplication}; +use crate::models::application::{Application, ApplicationError}; use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; use crate::models::router::RouterError; @@ -226,12 +226,12 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, ApplicationError> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Ok(Box::new(models::application::Application::::new( + CPKind::Aws => Ok(Box::new(models::application::ApplicationImpl::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -250,7 +250,7 @@ impl Application { listeners, logger.clone(), )?)), - CPKind::Do => Ok(Box::new(models::application::Application::::new( + CPKind::Do => Ok(Box::new(models::application::ApplicationImpl::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -269,7 +269,7 @@ impl Application { listeners, logger.clone(), )?)), - CPKind::Scw => Ok(Box::new(models::application::Application::::new( + CPKind::Scw => Ok(Box::new(models::application::ApplicationImpl::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -509,7 +509,7 @@ impl Router { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, RouterError> { + ) -> Result, RouterError> { let custom_domains = self .custom_domains .iter() @@ -532,7 +532,7 @@ impl Router { match cloud_provider.kind() { CPKind::Aws => { - let router = Box::new(models::router::Router::::new( + let router = Box::new(models::router::RouterImpl::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -548,7 +548,7 @@ impl Router { Ok(router) } CPKind::Do => { - let router = Box::new(models::router::Router::::new( + let router = Box::new(models::router::RouterImpl::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -564,7 +564,7 @@ impl Router { Ok(router) } CPKind::Scw => { - let router = Box::new(models::router::Router::::new( + let router = Box::new(models::router::RouterImpl::::new( context.clone(), self.id.as_str(), self.name.as_str(), diff --git a/src/models/application.rs b/src/models/application.rs index 4de1860a..f9574a98 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -24,7 +24,7 @@ pub enum ApplicationError { InvalidConfig(String), } -pub struct Application { +pub struct ApplicationImpl { _marker: PhantomData, pub(crate) context: Context, pub(crate) id: String, @@ -46,7 +46,7 @@ pub struct Application { } // Here we define the common behavior among all providers -impl Application { +impl ApplicationImpl { pub fn new( context: Context, id: &str, @@ -188,13 +188,13 @@ impl Application { } // Traits implementations -impl ToTransmitter for Application { +impl ToTransmitter for ApplicationImpl { fn to_transmitter(&self) -> Transmitter { Transmitter::Application(self.id.to_string(), self.name.to_string()) } } -impl Listen for Application { +impl Listen for ApplicationImpl { fn listeners(&self) -> &Listeners { &self.listeners } @@ -204,9 +204,9 @@ impl Listen for Application { } } -impl Service for Application +impl Service for ApplicationImpl where - Application: ToTeraContext, + ApplicationImpl: ToTeraContext, { fn context(&self) -> &Context { self.context() @@ -281,7 +281,7 @@ where } } -impl Helm for Application { +impl Helm for ApplicationImpl { fn helm_selector(&self) -> Option { self.selector() } @@ -307,9 +307,9 @@ impl Helm for Application { } } -impl Create for Application +impl Create for ApplicationImpl where - Application: Service, + ApplicationImpl: Service, { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -345,9 +345,9 @@ where } } -impl Pause for Application +impl Pause for ApplicationImpl where - Application: Service, + ApplicationImpl: Service, { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -386,9 +386,9 @@ where } } -impl Delete for Application +impl Delete for ApplicationImpl where - Application: Service, + ApplicationImpl: Service, { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -429,23 +429,23 @@ where } } -impl StatelessService for Application +impl StatelessService for ApplicationImpl where - Application: Service, + ApplicationImpl: Service, { fn as_stateless_service(&self) -> &dyn StatelessService { self } } -pub trait IApplication: StatelessService { +pub trait Application: StatelessService { fn get_build(&self) -> &Build; fn get_build_mut(&mut self) -> &mut Build; } -impl IApplication for Application +impl Application for ApplicationImpl where - Application: Service, + ApplicationImpl: Service, { fn get_build(&self) -> &Build { self.build() diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs index 85986e74..31cddb1a 100644 --- a/src/models/aws/application.rs +++ b/src/models/aws/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::Application; +use crate::models::application::ApplicationImpl; use crate::models::aws::AwsStorageType; use crate::models::types::{ToTeraContext, AWS}; use tera::Context as TeraContext; -impl ToTeraContext for Application { +impl ToTeraContext for ApplicationImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let mut context = default_tera_context(self, target.kubernetes, target.environment); diff --git a/src/models/aws/router.rs b/src/models/aws/router.rs index 8964c4f9..39cd3bac 100644 --- a/src/models/aws/router.rs +++ b/src/models/aws/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::Router; +use crate::models::router::RouterImpl; use crate::models::types::{ToTeraContext, AWS}; use tera::Context as TeraContext; -impl ToTeraContext for Router { +impl ToTeraContext for RouterImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { self.default_tera_context(target) } diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs index 664c8bde..873ef348 100644 --- a/src/models/digital_ocean/application.rs +++ b/src/models/digital_ocean/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::Application; +use crate::models::application::ApplicationImpl; use crate::models::digital_ocean::DoStorageType; use crate::models::types::{ToTeraContext, DO}; use tera::Context as TeraContext; -impl ToTeraContext for Application { +impl ToTeraContext for ApplicationImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; diff --git a/src/models/digital_ocean/router.rs b/src/models/digital_ocean/router.rs index a978c7c7..6000e6dc 100644 --- a/src/models/digital_ocean/router.rs +++ b/src/models/digital_ocean/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::Router; +use crate::models::router::RouterImpl; use crate::models::types::{ToTeraContext, DO}; use tera::Context as TeraContext; -impl ToTeraContext for Router { +impl ToTeraContext for RouterImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let mut context = self.default_tera_context(target)?; context.insert("doks_cluster_id", target.kubernetes.id()); diff --git a/src/models/router.rs b/src/models/router.rs index 829f0053..79281d88 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -2,7 +2,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, send_progress_on_long_task, Action, - Create, Delete, Helm, IRouter, Pause, Service, ServiceType, StatelessService, + Create, Delete, Helm, Pause, Router, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -25,7 +25,7 @@ pub enum RouterError { InvalidConfig(String), } -pub struct Router { +pub struct RouterImpl { _marker: PhantomData, pub(crate) context: Context, pub(crate) id: String, @@ -40,7 +40,7 @@ pub struct Router { pub(crate) _extra_settings: T::RouterExtraSettings, } -impl Router { +impl RouterImpl { pub fn new( context: Context, id: &str, @@ -185,13 +185,13 @@ impl Router { } } -impl ToTransmitter for Router { +impl ToTransmitter for RouterImpl { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id.to_string(), self.name.to_string()) } } -impl Listen for Router { +impl Listen for RouterImpl { fn listeners(&self) -> &Listeners { &self.listeners } @@ -201,7 +201,7 @@ impl Listen for Router { } } -impl Helm for Router { +impl Helm for RouterImpl { fn helm_selector(&self) -> Option { self.selector() } @@ -227,9 +227,9 @@ impl Helm for Router { } } -impl Service for Router +impl Service for RouterImpl where - Router: ToTeraContext, + RouterImpl: ToTeraContext, { fn context(&self) -> &Context { &self.context @@ -304,9 +304,9 @@ where } } -impl Create for Router +impl Create for RouterImpl where - Router: Service, + RouterImpl: Service, { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -433,9 +433,9 @@ where } } -impl Pause for Router +impl Pause for RouterImpl where - Router: Service, + RouterImpl: Service, { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { @@ -481,9 +481,9 @@ where } } -impl Delete for Router +impl Delete for RouterImpl where - Router: Service, + RouterImpl: Service, { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -535,18 +535,18 @@ where } } -impl StatelessService for Router +impl StatelessService for RouterImpl where - Router: Service, + RouterImpl: Service, { fn as_stateless_service(&self) -> &dyn StatelessService { self } } -impl IRouter for Router +impl Router for RouterImpl where - Router: Service, + RouterImpl: Service, { fn domains(&self) -> Vec<&str> { let mut domains = Vec::with_capacity(1 + self.custom_domains.len()); diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs index e43883dd..f75094f5 100644 --- a/src/models/scaleway/application.rs +++ b/src/models/scaleway/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::Application; +use crate::models::application::ApplicationImpl; use crate::models::scaleway::ScwStorageType; use crate::models::types::{ToTeraContext, SCW}; use tera::Context as TeraContext; -impl ToTeraContext for Application { +impl ToTeraContext for ApplicationImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; diff --git a/src/models/scaleway/router.rs b/src/models/scaleway/router.rs index 12828657..787dd6dc 100644 --- a/src/models/scaleway/router.rs +++ b/src/models/scaleway/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::Router; +use crate::models::router::RouterImpl; use crate::models::types::{ToTeraContext, SCW}; use tera::Context as TeraContext; -impl ToTeraContext for Router { +impl ToTeraContext for RouterImpl { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { self.default_tera_context(target) } diff --git a/src/transaction.rs b/src/transaction.rs index 95470bfc..846ce439 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -14,7 +14,7 @@ use crate::io_models::{ EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; use crate::logger::Logger; -use crate::models::application::IApplication; +use crate::models::application::Application; pub struct Transaction<'a> { engine: &'a EngineConfig, @@ -131,7 +131,7 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - applications: &mut [Box], + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications From 4d2e2bd2238ed471f297a7f192deab9f3967667c Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 29 Mar 2022 16:19:38 +0200 Subject: [PATCH 006/122] Revert "Rename Application and Router traits/structs (#671)" This reverts commit f7136d07631d220c95b8d6232676ca3a9c0b2b4c. --- src/cloud_provider/environment.rs | 12 ++++----- src/cloud_provider/service.rs | 2 +- src/io_models.rs | 20 +++++++------- src/models/application.rs | 36 ++++++++++++------------- src/models/aws/application.rs | 4 +-- src/models/aws/router.rs | 4 +-- src/models/digital_ocean/application.rs | 4 +-- src/models/digital_ocean/router.rs | 4 +-- src/models/router.rs | 36 ++++++++++++------------- src/models/scaleway/application.rs | 4 +-- src/models/scaleway/router.rs | 4 +-- src/transaction.rs | 4 +-- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index ec791a04..2fb5411b 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,5 +1,5 @@ -use crate::cloud_provider::service::{Action, Database, Router, StatefulService, StatelessService}; -use crate::models::application::Application; +use crate::cloud_provider::service::{Action, Database, IRouter, StatefulService, StatelessService}; +use crate::models::application::IApplication; pub struct Environment { namespace: String, @@ -8,8 +8,8 @@ pub struct Environment { pub owner_id: String, pub organization_id: String, pub action: Action, - pub applications: Vec>, - pub routers: Vec>, + pub applications: Vec>, + pub routers: Vec>, pub databases: Vec>, } @@ -20,8 +20,8 @@ impl Environment { owner_id: &str, organization_id: &str, action: Action, - applications: Vec>, - routers: Vec>, + applications: Vec>, + routers: Vec>, databases: Vec>, ) -> Self { Environment { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index ad1b1e1f..ecf8b2e9 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -157,7 +157,7 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait Router: StatelessService + Listen + Helm { +pub trait IRouter: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; fn has_custom_domains(&self) -> bool; fn check_domains(&self, event_details: EventDetails, logger: &dyn Logger) -> Result<(), EngineError> { diff --git a/src/io_models.rs b/src/io_models.rs index 572d61f4..249df2cc 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -27,7 +27,7 @@ use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; use crate::cloud_provider::scaleway::databases::redis::RedisScw; -use crate::cloud_provider::service::{DatabaseOptions, Router}; +use crate::cloud_provider::service::{DatabaseOptions, IRouter}; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; @@ -35,7 +35,7 @@ use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; -use crate::models::application::{Application, ApplicationError}; +use crate::models::application::{ApplicationError, IApplication}; use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; use crate::models::router::RouterError; @@ -226,12 +226,12 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, ApplicationError> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); match cloud_provider.kind() { - CPKind::Aws => Ok(Box::new(models::application::ApplicationImpl::::new( + CPKind::Aws => Ok(Box::new(models::application::Application::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -250,7 +250,7 @@ impl Application { listeners, logger.clone(), )?)), - CPKind::Do => Ok(Box::new(models::application::ApplicationImpl::::new( + CPKind::Do => Ok(Box::new(models::application::Application::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -269,7 +269,7 @@ impl Application { listeners, logger.clone(), )?)), - CPKind::Scw => Ok(Box::new(models::application::ApplicationImpl::::new( + CPKind::Scw => Ok(Box::new(models::application::Application::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), @@ -509,7 +509,7 @@ impl Router { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, RouterError> { + ) -> Result, RouterError> { let custom_domains = self .custom_domains .iter() @@ -532,7 +532,7 @@ impl Router { match cloud_provider.kind() { CPKind::Aws => { - let router = Box::new(models::router::RouterImpl::::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -548,7 +548,7 @@ impl Router { Ok(router) } CPKind::Do => { - let router = Box::new(models::router::RouterImpl::::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), @@ -564,7 +564,7 @@ impl Router { Ok(router) } CPKind::Scw => { - let router = Box::new(models::router::RouterImpl::::new( + let router = Box::new(models::router::Router::::new( context.clone(), self.id.as_str(), self.name.as_str(), diff --git a/src/models/application.rs b/src/models/application.rs index f9574a98..4de1860a 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -24,7 +24,7 @@ pub enum ApplicationError { InvalidConfig(String), } -pub struct ApplicationImpl { +pub struct Application { _marker: PhantomData, pub(crate) context: Context, pub(crate) id: String, @@ -46,7 +46,7 @@ pub struct ApplicationImpl { } // Here we define the common behavior among all providers -impl ApplicationImpl { +impl Application { pub fn new( context: Context, id: &str, @@ -188,13 +188,13 @@ impl ApplicationImpl { } // Traits implementations -impl ToTransmitter for ApplicationImpl { +impl ToTransmitter for Application { fn to_transmitter(&self) -> Transmitter { Transmitter::Application(self.id.to_string(), self.name.to_string()) } } -impl Listen for ApplicationImpl { +impl Listen for Application { fn listeners(&self) -> &Listeners { &self.listeners } @@ -204,9 +204,9 @@ impl Listen for ApplicationImpl { } } -impl Service for ApplicationImpl +impl Service for Application where - ApplicationImpl: ToTeraContext, + Application: ToTeraContext, { fn context(&self) -> &Context { self.context() @@ -281,7 +281,7 @@ where } } -impl Helm for ApplicationImpl { +impl Helm for Application { fn helm_selector(&self) -> Option { self.selector() } @@ -307,9 +307,9 @@ impl Helm for ApplicationImpl { } } -impl Create for ApplicationImpl +impl Create for Application where - ApplicationImpl: Service, + Application: Service, { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -345,9 +345,9 @@ where } } -impl Pause for ApplicationImpl +impl Pause for Application where - ApplicationImpl: Service, + Application: Service, { #[named] fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -386,9 +386,9 @@ where } } -impl Delete for ApplicationImpl +impl Delete for Application where - ApplicationImpl: Service, + Application: Service, { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -429,23 +429,23 @@ where } } -impl StatelessService for ApplicationImpl +impl StatelessService for Application where - ApplicationImpl: Service, + Application: Service, { fn as_stateless_service(&self) -> &dyn StatelessService { self } } -pub trait Application: StatelessService { +pub trait IApplication: StatelessService { fn get_build(&self) -> &Build; fn get_build_mut(&mut self) -> &mut Build; } -impl Application for ApplicationImpl +impl IApplication for Application where - ApplicationImpl: Service, + Application: Service, { fn get_build(&self) -> &Build { self.build() diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs index 31cddb1a..85986e74 100644 --- a/src/models/aws/application.rs +++ b/src/models/aws/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::ApplicationImpl; +use crate::models::application::Application; use crate::models::aws::AwsStorageType; use crate::models::types::{ToTeraContext, AWS}; use tera::Context as TeraContext; -impl ToTeraContext for ApplicationImpl { +impl ToTeraContext for Application { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let mut context = default_tera_context(self, target.kubernetes, target.environment); diff --git a/src/models/aws/router.rs b/src/models/aws/router.rs index 39cd3bac..8964c4f9 100644 --- a/src/models/aws/router.rs +++ b/src/models/aws/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::RouterImpl; +use crate::models::router::Router; use crate::models::types::{ToTeraContext, AWS}; use tera::Context as TeraContext; -impl ToTeraContext for RouterImpl { +impl ToTeraContext for Router { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { self.default_tera_context(target) } diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs index 873ef348..664c8bde 100644 --- a/src/models/digital_ocean/application.rs +++ b/src/models/digital_ocean/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::ApplicationImpl; +use crate::models::application::Application; use crate::models::digital_ocean::DoStorageType; use crate::models::types::{ToTeraContext, DO}; use tera::Context as TeraContext; -impl ToTeraContext for ApplicationImpl { +impl ToTeraContext for Application { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; diff --git a/src/models/digital_ocean/router.rs b/src/models/digital_ocean/router.rs index 6000e6dc..a978c7c7 100644 --- a/src/models/digital_ocean/router.rs +++ b/src/models/digital_ocean/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::RouterImpl; +use crate::models::router::Router; use crate::models::types::{ToTeraContext, DO}; use tera::Context as TeraContext; -impl ToTeraContext for RouterImpl { +impl ToTeraContext for Router { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let mut context = self.default_tera_context(target)?; context.insert("doks_cluster_id", target.kubernetes.id()); diff --git a/src/models/router.rs b/src/models/router.rs index 79281d88..829f0053 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -2,7 +2,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, send_progress_on_long_task, Action, - Create, Delete, Helm, Pause, Router, Service, ServiceType, StatelessService, + Create, Delete, Helm, IRouter, Pause, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -25,7 +25,7 @@ pub enum RouterError { InvalidConfig(String), } -pub struct RouterImpl { +pub struct Router { _marker: PhantomData, pub(crate) context: Context, pub(crate) id: String, @@ -40,7 +40,7 @@ pub struct RouterImpl { pub(crate) _extra_settings: T::RouterExtraSettings, } -impl RouterImpl { +impl Router { pub fn new( context: Context, id: &str, @@ -185,13 +185,13 @@ impl RouterImpl { } } -impl ToTransmitter for RouterImpl { +impl ToTransmitter for Router { fn to_transmitter(&self) -> Transmitter { Transmitter::Router(self.id.to_string(), self.name.to_string()) } } -impl Listen for RouterImpl { +impl Listen for Router { fn listeners(&self) -> &Listeners { &self.listeners } @@ -201,7 +201,7 @@ impl Listen for RouterImpl { } } -impl Helm for RouterImpl { +impl Helm for Router { fn helm_selector(&self) -> Option { self.selector() } @@ -227,9 +227,9 @@ impl Helm for RouterImpl { } } -impl Service for RouterImpl +impl Service for Router where - RouterImpl: ToTeraContext, + Router: ToTeraContext, { fn context(&self) -> &Context { &self.context @@ -304,9 +304,9 @@ where } } -impl Create for RouterImpl +impl Create for Router where - RouterImpl: Service, + Router: Service, { #[named] fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -433,9 +433,9 @@ where } } -impl Pause for RouterImpl +impl Pause for Router where - RouterImpl: Service, + Router: Service, { #[named] fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { @@ -481,9 +481,9 @@ where } } -impl Delete for RouterImpl +impl Delete for Router where - RouterImpl: Service, + Router: Service, { #[named] fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { @@ -535,18 +535,18 @@ where } } -impl StatelessService for RouterImpl +impl StatelessService for Router where - RouterImpl: Service, + Router: Service, { fn as_stateless_service(&self) -> &dyn StatelessService { self } } -impl Router for RouterImpl +impl IRouter for Router where - RouterImpl: Service, + Router: Service, { fn domains(&self) -> Vec<&str> { let mut domains = Vec::with_capacity(1 + self.custom_domains.len()); diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs index f75094f5..e43883dd 100644 --- a/src/models/scaleway/application.rs +++ b/src/models/scaleway/application.rs @@ -5,12 +5,12 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; use crate::io_models::ListenersHelper; -use crate::models::application::ApplicationImpl; +use crate::models::application::Application; use crate::models::scaleway::ScwStorageType; use crate::models::types::{ToTeraContext, SCW}; use tera::Context as TeraContext; -impl ToTeraContext for ApplicationImpl { +impl ToTeraContext for Application { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; diff --git a/src/models/scaleway/router.rs b/src/models/scaleway/router.rs index 787dd6dc..12828657 100644 --- a/src/models/scaleway/router.rs +++ b/src/models/scaleway/router.rs @@ -1,10 +1,10 @@ use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; -use crate::models::router::RouterImpl; +use crate::models::router::Router; use crate::models::types::{ToTeraContext, SCW}; use tera::Context as TeraContext; -impl ToTeraContext for RouterImpl { +impl ToTeraContext for Router { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { self.default_tera_context(target) } diff --git a/src/transaction.rs b/src/transaction.rs index 846ce439..95470bfc 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -14,7 +14,7 @@ use crate::io_models::{ EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; use crate::logger::Logger; -use crate::models::application::Application; +use crate::models::application::IApplication; pub struct Transaction<'a> { engine: &'a EngineConfig, @@ -131,7 +131,7 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - applications: &mut [Box], + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications From 9f472f67f5611a7769d3a2d7168e7ef4978790af Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 29 Mar 2022 16:27:35 +0200 Subject: [PATCH 007/122] Rename to avoid conflict --- src/cloud_provider/environment.rs | 12 ++++++------ src/cloud_provider/service.rs | 2 +- src/io_models.rs | 8 ++++---- src/models/application.rs | 4 ++-- src/models/router.rs | 4 ++-- src/transaction.rs | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index 2fb5411b..f732aeca 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,5 +1,5 @@ -use crate::cloud_provider::service::{Action, Database, IRouter, StatefulService, StatelessService}; -use crate::models::application::IApplication; +use crate::cloud_provider::service::{Action, Database, RouterService, StatefulService, StatelessService}; +use crate::models::application::ApplicationService; pub struct Environment { namespace: String, @@ -8,8 +8,8 @@ pub struct Environment { pub owner_id: String, pub organization_id: String, pub action: Action, - pub applications: Vec>, - pub routers: Vec>, + pub applications: Vec>, + pub routers: Vec>, pub databases: Vec>, } @@ -20,8 +20,8 @@ impl Environment { owner_id: &str, organization_id: &str, action: Action, - applications: Vec>, - routers: Vec>, + applications: Vec>, + routers: Vec>, databases: Vec>, ) -> Self { Environment { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index ecf8b2e9..76d4ebc9 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -157,7 +157,7 @@ pub trait StatefulService: Service + Create + Pause + Delete { fn is_managed_service(&self) -> bool; } -pub trait IRouter: StatelessService + Listen + Helm { +pub trait RouterService: StatelessService + Listen + Helm { fn domains(&self) -> Vec<&str>; fn has_custom_domains(&self) -> bool; fn check_domains(&self, event_details: EventDetails, logger: &dyn Logger) -> Result<(), EngineError> { diff --git a/src/io_models.rs b/src/io_models.rs index 249df2cc..702348fd 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -27,7 +27,7 @@ use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; use crate::cloud_provider::scaleway::databases::redis::RedisScw; -use crate::cloud_provider::service::{DatabaseOptions, IRouter}; +use crate::cloud_provider::service::{DatabaseOptions, RouterService}; use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; @@ -35,7 +35,7 @@ use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; -use crate::models::application::{ApplicationError, IApplication}; +use crate::models::application::{ApplicationError, ApplicationService}; use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; use crate::models::router::RouterError; @@ -226,7 +226,7 @@ impl Application { build: Build, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, ApplicationError> { + ) -> Result, ApplicationError> { let environment_variables = to_environment_variable(&self.environment_vars); let listeners = cloud_provider.listeners().clone(); @@ -509,7 +509,7 @@ impl Router { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, RouterError> { + ) -> Result, RouterError> { let custom_domains = self .custom_domains .iter() diff --git a/src/models/application.rs b/src/models/application.rs index 4de1860a..296821bf 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -438,12 +438,12 @@ where } } -pub trait IApplication: StatelessService { +pub trait ApplicationService: StatelessService { fn get_build(&self) -> &Build; fn get_build_mut(&mut self) -> &mut Build; } -impl IApplication for Application +impl ApplicationService for Application where Application: Service, { diff --git a/src/models/router.rs b/src/models/router.rs index 829f0053..8514d8a2 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -2,7 +2,7 @@ use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate}; use crate::cloud_provider::service::{ default_tera_context, delete_stateless_service, deploy_stateless_service_error, send_progress_on_long_task, Action, - Create, Delete, Helm, IRouter, Pause, Service, ServiceType, StatelessService, + Create, Delete, Helm, Pause, RouterService, Service, ServiceType, StatelessService, }; use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; @@ -544,7 +544,7 @@ where } } -impl IRouter for Router +impl RouterService for Router where Router: Service, { diff --git a/src/transaction.rs b/src/transaction.rs index 95470bfc..c0cbe5a5 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -14,7 +14,7 @@ use crate::io_models::{ EnvironmentError, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, }; use crate::logger::Logger; -use crate::models::application::IApplication; +use crate::models::application::ApplicationService; pub struct Transaction<'a> { engine: &'a EngineConfig, @@ -131,7 +131,7 @@ impl<'a> Transaction<'a> { fn build_and_push_applications( &self, - applications: &mut [Box], + applications: &mut [Box], option: &DeploymentOption, ) -> Result<(), EngineError> { // do the same for applications From 7fdfc0218c867d9c618a11fc1e00e746ecdad8c6 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Wed, 30 Mar 2022 14:23:09 +0200 Subject: [PATCH 008/122] fix: avoid leaking details to legacy errors (#673) Ticket: ENG-1156 --- src/cloud_provider/aws/databases/mongodb.rs | 23 ++-- src/cloud_provider/aws/databases/mysql.rs | 23 ++-- .../aws/databases/postgresql.rs | 34 +++--- src/cloud_provider/aws/databases/redis.rs | 23 ++-- src/cloud_provider/aws/databases/utilities.rs | 9 +- src/cloud_provider/aws/kubernetes/mod.rs | 15 ++- .../digitalocean/kubernetes/doks_api.rs | 4 +- .../digitalocean/kubernetes/mod.rs | 30 +++-- src/cloud_provider/helm.rs | 9 +- src/cloud_provider/kubernetes.rs | 25 +++-- src/cloud_provider/scaleway/kubernetes/mod.rs | 15 ++- src/cloud_provider/service.rs | 7 +- src/cmd/helm.rs | 10 +- src/cmd/kubectl.rs | 6 +- src/cmd/terraform.rs | 15 ++- src/errors/io.rs | 4 +- src/errors/mod.rs | 101 ++++++++++++----- src/events/mod.rs | 104 ++++++++++++++++-- src/io_models.rs | 17 ++- test_utilities/src/utilities.rs | 2 +- 20 files changed, 306 insertions(+), 170 deletions(-) diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs index 386a0b88..9f8af5fc 100644 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ b/src/cloud_provider/aws/databases/mongodb.rs @@ -429,28 +429,23 @@ fn get_managed_mongodb_version(requested_version: String) -> Result Result Result Result String { mod tests_aws_databases_parameters { use crate::cloud_provider::aws::databases::utilities::get_parameter_group_from_version; use crate::cloud_provider::utilities::VersionsNumber; + use crate::errors::ErrorMessageVerbosity; use crate::io_models::DatabaseKind; use std::str::FromStr; @@ -49,9 +50,9 @@ mod tests_aws_databases_parameters { VersionsNumber::from_str("8").expect("error while trying to get version from str"), DatabaseKind::Mysql, ); - assert_eq!( - mysql_parameter_group.unwrap_err().message(), - "Can't determine the minor version, to select parameter group for Mysql version 8" - ); + assert!(mysql_parameter_group + .unwrap_err() + .message(ErrorMessageVerbosity::FullDetails) + .contains("Can't determine the minor version, to select parameter group for Mysql version 8")); } } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 09e93e87..3104f6f1 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -35,7 +35,7 @@ use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider; use crate::dns_provider::DnsProvider; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; use crate::io_models::{ @@ -763,7 +763,10 @@ impl EKS { .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), Err(err) => self.logger().log(EngineEvent::Warning( event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + EventMessage::new( + "Error trying to get kubernetes events".to_string(), + Some(err.message(ErrorMessageVerbosity::FullDetails)), + ), )), }; @@ -1011,7 +1014,7 @@ impl EKS { let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message())), + EventMessage::new(safe_message.to_string(), Some(e.message(ErrorMessageVerbosity::FullDetails))), )); skip_kubernetes_step = true; @@ -1084,7 +1087,7 @@ impl EKS { )), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!( @@ -1104,7 +1107,7 @@ impl EKS { ); self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), + EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), )); } } @@ -1182,7 +1185,7 @@ impl EKS { EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 3f393de8..a1c2a033 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -88,7 +88,7 @@ pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Res Ok(clusters) => Ok(clusters), Err(e) => Err(CommandError::new_from_safe_message(e.to_string())), }, - Err(e) => Err(CommandError::new_from_safe_message(e.message())), + Err(e) => Err(e), }; let clusters_copy = clusters.expect("Unable to list clusters").kubernetes_clusters; @@ -108,7 +108,7 @@ pub fn get_do_kubeconfig_by_cluster_name(token: &str, cluster_name: &str) -> Res } Ok(Some(kubeconfig)) } - Err(e) => Err(CommandError::new_from_safe_message(e.message())), + Err(e) => Err(e), } } None => Ok(None), diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 8d58135d..ed236cd6 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -34,7 +34,7 @@ use crate::cmd::kubectl::{ use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::Stage::Infrastructure; use crate::events::{ EngineEvent, EnvironmentStep, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter, @@ -692,10 +692,7 @@ impl DOKS { let safe_message = "Load balancer IP wasn't able to be retrieved from UUID on DigitalOcean API and it's required for TLS setup"; return Err(EngineError::new_k8s_loadbalancer_configuration_issue( event_details.clone(), - CommandError::new( - format!("{}, error: {}.", safe_message, e.message(),), - Some(safe_message.to_string()), - ), + CommandError::new(e.message(ErrorMessageVerbosity::FullDetails), Some(safe_message.to_string())), )); } }; @@ -747,7 +744,10 @@ impl DOKS { .log(EngineEvent::Warning(event_details, EventMessage::new(ok_line, None))), Err(err) => self.logger().log(EngineEvent::Warning( event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + EventMessage::new( + "Error trying to get kubernetes events".to_string(), + Some(err.message(ErrorMessageVerbosity::FullDetails)), + ), )), }; @@ -906,7 +906,7 @@ impl DOKS { )), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!( @@ -926,7 +926,7 @@ impl DOKS { ); self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), + EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), )); } } @@ -1001,7 +1001,7 @@ impl DOKS { EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), @@ -1209,12 +1209,7 @@ impl Kubernetes for DOKS { } Some(content) => content, }, - Err(e) => { - return Err(EngineError::new_cannot_retrieve_cluster_config_file( - event_details, - CommandError::new(e.message(), Some(e.message())), - )) - } + Err(e) => return Err(EngineError::new_cannot_retrieve_cluster_config_file(event_details, e)), }; let workspace_directory = crate::fs::workspace_directory( @@ -1263,7 +1258,10 @@ impl Kubernetes for DOKS { match result { Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( event_details, - CommandError::new(e.message(), Some(e.message())), + CommandError::new( + e.message(ErrorMessageVerbosity::FullDetails), + Some(e.message(ErrorMessageVerbosity::SafeOnly)), + ), )), Ok((file_path, file)) => Ok((file_path, file)), } diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index fb3be15b..5da089c7 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -7,7 +7,7 @@ use crate::cmd::kubectl::{ kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output, }; use crate::cmd::structs::HelmHistoryRow; -use crate::errors::CommandError; +use crate::errors::{CommandError, ErrorMessageVerbosity}; use crate::utilities::calculate_hash; use semver::Version; use std::collections::HashMap; @@ -205,7 +205,7 @@ pub trait HelmChart: Send { let payload = match self.exec(kubernetes_config, envs, payload.clone()) { Ok(payload) => payload, Err(e) => { - error!("Error while deploying chart: {}", e.message()); + error!("Error while deploying chart: {}", e.message(ErrorMessageVerbosity::FullDetails)); self.on_deploy_failure(kubernetes_config, envs, payload)?; return Err(e); } @@ -502,7 +502,10 @@ impl HelmChart for CoreDNSConfigChart { Err(e) => return Err(e), }; if let Err(e) = self.exec(kubernetes_config, envs, None) { - error!("Error while deploying chart: {:?}", e.message()); + error!( + "Error while deploying chart: {:?}", + e.message(ErrorMessageVerbosity::FullDetails) + ); self.on_deploy_failure(kubernetes_config, envs, None)?; return Err(e); }; diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index e1e90163..d2585abd 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -27,7 +27,7 @@ use crate::cmd::kubectl::{ }; use crate::cmd::structs::KubernetesNodeCondition; use crate::dns_provider::DnsProvider; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EventDetails, EventMessage, GeneralStep, InfrastructureStep, Stage, Transmitter}; use crate::fs::workspace_directory; @@ -183,10 +183,10 @@ pub trait Kubernetes: Listen { Err(err) => { let error = EngineError::new_cannot_get_cluster_nodes( self.get_event_details(stage), - CommandError::new_from_safe_message(format!( - "Error while trying to get cluster nodes, error: {}", - err.message() - )), + CommandError::new( + err.message(ErrorMessageVerbosity::FullDetails), + Some("Error while trying to get cluster nodes.".to_string()), + ), ); self.logger().log(EngineEvent::Error(error.clone(), None)); @@ -267,7 +267,12 @@ pub trait Kubernetes: Listen { { let kubeconfig = match self.get_kubeconfig_file() { Ok((path, _)) => path, - Err(e) => return Err(CommandError::new(e.message(), None)), + Err(e) => { + return Err(CommandError::new( + e.message(ErrorMessageVerbosity::FullDetails), + Some(e.message(ErrorMessageVerbosity::SafeOnly)), + )) + } }; send_progress_on_long_task(self, Action::Create, || { @@ -786,12 +791,8 @@ where logger.log(EngineEvent::Warning( event_details.clone(), EventMessage::new( - format!( - "Encountering issues while trying to get objects kind {}: {:?}", - object, - e.message() - ), - None, + format!("Encountering issues while trying to get objects kind {}", object,), + Some(e.message(ErrorMessageVerbosity::FullDetails)), ), )); continue; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 4b3e8d78..fc8b6027 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -19,7 +19,7 @@ use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_ use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::Stage::Infrastructure; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; use crate::io_models::{ @@ -995,7 +995,10 @@ impl Kapsule { .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(ok_line))), Err(err) => self.logger().log(EngineEvent::Warning( event_details, - EventMessage::new("Error trying to get kubernetes events".to_string(), Some(err.message())), + EventMessage::new( + "Error trying to get kubernetes events".to_string(), + Some(err.message(ErrorMessageVerbosity::FullDetails)), + ), )), }; @@ -1135,7 +1138,7 @@ impl Kapsule { Err(e) => { let safe_message = format!("Error while looking at the API metric value {}", metric_name); OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e.message()), Some(safe_message)))) + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(e.message(ErrorMessageVerbosity::FullDetails), Some(safe_message)))) } }; }); @@ -1306,7 +1309,7 @@ impl Kapsule { )), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!( @@ -1326,7 +1329,7 @@ impl Kapsule { ); self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(message_safe, Some(e.message())), + EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), )); } } @@ -1401,7 +1404,7 @@ impl Kapsule { EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), )), Err(e) => { - if !(e.message().contains("not found")) { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 76d4ebc9..2d6da1a1 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -18,7 +18,7 @@ use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::Statefulset; use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replicas_by_selector, ScalingKind}; use crate::cmd::structs::LabelsContent; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; use crate::io_models::ProgressLevel::Info; use crate::io_models::{ @@ -1056,7 +1056,10 @@ where Err(EngineError::new_k8s_service_issue( event_details, - CommandError::new(err.message(), Some("Error with Kubernetes service".to_string())), + CommandError::new( + err.message(ErrorMessageVerbosity::FullDetails), + Some("Error with Kubernetes service".to_string()), + ), )) } _ => { diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index 16d319c2..e857a916 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -8,7 +8,7 @@ use crate::cmd::command::QoveryCommand; use crate::cmd::helm::HelmCommand::{LIST, ROLLBACK, STATUS, UNINSTALL, UPGRADE}; use crate::cmd::helm::HelmError::{CannotRollback, CmdError, InvalidKubeConfig, ReleaseDoesNotExist}; use crate::cmd::structs::{HelmChart, HelmListItem}; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::EventDetails; use semver::Version; use serde_derive::Deserialize; @@ -137,7 +137,7 @@ impl Helm { ) { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { - stderr.push_str(&err.message()); + stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); let error = CommandError::new(stderr, err.message_safe()); Err(CmdError(chart.name.clone(), STATUS, error)) } @@ -174,7 +174,7 @@ impl Helm { let mut stderr = String::new(); match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { - stderr.push_str(&err.message()); + stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); let error = CommandError::new(stderr, err.message_safe()); Err(CmdError(chart.name.clone(), ROLLBACK, error)) } @@ -207,7 +207,7 @@ impl Helm { let mut stderr = String::new(); match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { - stderr.push_str(&err.message()); + stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); let error = CommandError::new(stderr, err.message_safe()); Err(CmdError(chart.name.clone(), UNINSTALL, error)) } @@ -483,7 +483,7 @@ impl Helm { // Try do define/specify a bit more the message let stderr_msg: String = error_message.into_iter().collect(); - let stderr_msg = format!("{}: {}", stderr_msg, err.message()); + let stderr_msg = format!("{}: {}", stderr_msg, err.message(ErrorMessageVerbosity::FullDetails)); let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { HelmError::ReleaseLocked(chart.name.clone()) } else if stderr_msg.contains("has been rolled back") { diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 369488f6..7318f75e 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -14,7 +14,7 @@ use crate::cmd::structs::{ }; use crate::constants::KUBECONFIG; use crate::error::{SimpleError, SimpleErrorKind}; -use crate::errors::CommandError; +use crate::errors::{CommandError, ErrorMessageVerbosity}; pub enum ScalingKind { Deployment, @@ -839,7 +839,7 @@ where match result { Ok(_) => Ok(()), Err(e) => { - let lower_case_message = e.message().to_lowercase(); + let lower_case_message = e.message(ErrorMessageVerbosity::FullDetails).to_lowercase(); if lower_case_message.contains("no resources found") || lower_case_message.ends_with(" deleted") { return Ok(()); } @@ -1158,7 +1158,7 @@ where &mut |_| {}, ) { Ok(_) => Ok(pod_to_be_deleted), - Err(e) => Err(CommandError::new(e.message(), None)), + Err(e) => Err(e), } } diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index 8662d99e..d20ef391 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -4,7 +4,7 @@ use retry::OperationResult; use crate::cmd::command::QoveryCommand; use crate::constants::TF_PLUGIN_CACHE_DIR; -use crate::errors::CommandError; +use crate::errors::{CommandError, ErrorMessageVerbosity}; use rand::Rng; use retry::Error::Operation; use std::{env, fs, thread, time}; @@ -14,8 +14,12 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re // in order to avoid lock errors on parallel run, let's sleep a bit // https://github.com/hashicorp/terraform/issues/28041 - if err.message().contains("Failed to install provider from shared cache") - || err.message().contains("Failed to install provider") + if err + .message(ErrorMessageVerbosity::FullDetails) + .contains("Failed to install provider from shared cache") + || err + .message(ErrorMessageVerbosity::FullDetails) + .contains("Failed to install provider") { let sleep_time_int = rand::thread_rng().gen_range(20..45); let sleep_time = time::Duration::from_secs(sleep_time_int); @@ -33,7 +37,10 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re )), )), }; - } else if err.message().contains("Plugin reinitialization required") { + } else if err + .message(ErrorMessageVerbosity::FullDetails) + .contains("Plugin reinitialization required") + { // terraform init is required return Ok(()); } diff --git a/src/errors/io.rs b/src/errors/io.rs index 42501a16..d743824a 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -6,14 +6,14 @@ use serde_derive::{Deserialize, Serialize}; #[serde(rename_all = "lowercase")] pub struct CommandError { message: String, - message_unsafe: String, + full_details: String, } impl From for CommandError { fn from(error: errors::CommandError) -> Self { CommandError { message: error.message_safe.unwrap_or_default(), - message_unsafe: error.message_raw, + full_details: error.full_details, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 22184db2..2831c945 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -16,19 +16,28 @@ use std::fmt::{Display, Formatter}; use thiserror::Error; use url::Url; +/// ErrorMessageVerbosity: represents command error message's verbosity from minimal to full verbosity. +pub enum ErrorMessageVerbosity { + SafeOnly, + FullDetailsWithoutEnvVars, + FullDetails, +} + /// CommandError: command error, mostly returned by third party tools. #[derive(Clone, Debug, Error, PartialEq)] pub struct CommandError { - /// message: full error message, can contains unsafe text such as passwords and tokens. - message_raw: String, + /// full_details: full error message, can contains unsafe text such as passwords and tokens. + full_details: String, /// message_safe: error message omitting displaying any protected data such as passwords and tokens. message_safe: Option, + /// env_vars: environments variables including touchy data such as secret keys. + env_vars: Option>, } impl CommandError { /// Returns CommandError message_raw. May contains unsafe text such as passwords and tokens. pub fn message_raw(&self) -> String { - self.message_raw.to_string() + self.full_details.to_string() } /// Returns CommandError message_safe omitting all unsafe text such as passwords and tokens. @@ -36,17 +45,41 @@ impl CommandError { self.message_safe.clone() } - /// Returns error all message (safe + unsafe). - pub fn message(&self) -> String { - // TODO(benjaminch): To be revamped, not sure how we should deal with safe and unsafe messages. - if let Some(msg) = &self.message_safe { - // TODO(benjaminch): Handle raw / safe as for event message - if self.message_raw != *msg { - return format!("{} {}", msg, self.message_raw); - } - } + /// Returns CommandError env_vars. + pub fn env_vars(&self) -> Option> { + self.env_vars.clone() + } - self.message_raw.to_string() + /// Returns error message based on verbosity. + pub fn message(&self, message_verbosity: ErrorMessageVerbosity) -> String { + match message_verbosity { + ErrorMessageVerbosity::SafeOnly => match &self.message_safe { + None => "".to_string(), + Some(msg) => msg.to_string(), + }, + ErrorMessageVerbosity::FullDetailsWithoutEnvVars => match &self.message_safe { + None => self.full_details.to_string(), + Some(safe) => format!("{} / Full details: {}", safe, self.full_details), + }, + ErrorMessageVerbosity::FullDetails => match &self.message_safe { + None => self.full_details.to_string(), + Some(safe) => match &self.env_vars { + None => format!("{} / Full details: {}", safe, self.full_details), + Some(env_vars) => { + format!( + "{} / Full details: {} / Env vars: {}", + safe, + self.full_details, + env_vars + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(" "), + ) + } + }, + }, + } } /// Creates a new CommandError from safe message. To be used when message is safe. @@ -57,8 +90,22 @@ impl CommandError { /// Creates a new CommandError having both a safe and an unsafe message. pub fn new(message_raw: String, message_safe: Option) -> Self { CommandError { - message_raw, + full_details: message_raw, message_safe, + env_vars: None, + } + } + + /// Creates a new CommandError having a safe, an unsafe message and env vars. + pub fn new_with_env_vars( + message_raw: String, + message_safe: Option, + env_vars: Option>, + ) -> Self { + CommandError { + full_details: message_raw, + message_safe, + env_vars, } } @@ -68,8 +115,9 @@ impl CommandError { safe_message: Option, ) -> Self { CommandError { - message_raw: legacy_command_error.to_string(), + full_details: legacy_command_error.to_string(), message_safe: safe_message, + env_vars: None, } } @@ -82,16 +130,7 @@ impl CommandError { stdout: Option, stderr: Option, ) -> Self { - let mut unsafe_message = format!( - "{}\ncommand: {} {}\nenv: {}", - message, - bin, - cmd_args.join(" "), - envs.iter() - .map(|(k, v)| format!("{}={}", k, v)) - .collect::>() - .join(" ") - ); + let mut unsafe_message = format!("{}\ncommand: {} {}", message, bin, cmd_args.join(" "),); if let Some(txt) = stdout { unsafe_message = format!("{}\nSTDOUT {}", unsafe_message, txt); @@ -100,13 +139,13 @@ impl CommandError { unsafe_message = format!("{}\nSTDERR {}", unsafe_message, txt); } - CommandError::new(unsafe_message, Some(message)) + CommandError::new_with_env_vars(unsafe_message, Some(message), Some(envs)) } } impl Display for CommandError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(self.message().as_str()) + f.write_str(self.message(ErrorMessageVerbosity::SafeOnly).as_str()) // By default, expose safe message only } } @@ -356,9 +395,9 @@ impl EngineError { } /// Returns proper error message. - pub fn message(&self) -> String { + pub fn message(&self, message_verbosity: ErrorMessageVerbosity) -> String { match &self.message { - Some(msg) => msg.message(), + Some(msg) => msg.message(message_verbosity), None => self.qovery_log_message.to_string(), } } @@ -445,7 +484,9 @@ impl EngineError { EngineErrorCause::Internal, EngineErrorScope::from(self.event_details.transmitter()), self.event_details.execution_id().to_string(), - Some(self.message()), + // Note: Since legacy EngineError is read directly as is in the Core, not all details are exposed + // since it can lead to expose secrets, hence not exposing env vars which may contains secrets. + Some(self.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars)), ) } diff --git a/src/events/mod.rs b/src/events/mod.rs index deb2eb19..f1e472a9 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -7,7 +7,7 @@ pub mod io; extern crate url; use crate::cloud_provider::Kind; -use crate::errors::{CommandError, EngineError}; +use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::io_models::QoveryIdentifier; use std::fmt::{Display, Formatter}; @@ -41,7 +41,7 @@ impl EngineEvent { EngineEvent::Debug(_details, message) => message.message(message_verbosity), EngineEvent::Info(_details, message) => message.message(message_verbosity), EngineEvent::Warning(_details, message) => message.message(message_verbosity), - EngineEvent::Error(engine_error, _message) => engine_error.message(), + EngineEvent::Error(engine_error, _message) => engine_error.message(message_verbosity.into()), } } } @@ -49,9 +49,20 @@ impl EngineEvent { /// EventMessageVerbosity: represents event message's verbosity from minimal to full verbosity. pub enum EventMessageVerbosity { SafeOnly, + FullDetailsWithoutEnvVars, FullDetails, } +impl From for ErrorMessageVerbosity { + fn from(verbosity: EventMessageVerbosity) -> Self { + match verbosity { + EventMessageVerbosity::SafeOnly => ErrorMessageVerbosity::SafeOnly, + EventMessageVerbosity::FullDetailsWithoutEnvVars => ErrorMessageVerbosity::FullDetailsWithoutEnvVars, + EventMessageVerbosity::FullDetails => ErrorMessageVerbosity::FullDetails, + } + } +} + #[derive(Debug, Clone)] /// EventMessage: represents an event message. pub struct EventMessage { @@ -59,6 +70,8 @@ pub struct EventMessage { safe_message: String, // String containing full details including touchy data (passwords and tokens). full_details: Option, + // Environments variables including touchy data such as secret keys. + env_vars: Option>, } impl EventMessage { @@ -72,6 +85,26 @@ impl EventMessage { EventMessage { safe_message, full_details, + env_vars: None, + } + } + + /// Creates e new EventMessage with environment variables. + /// + /// Arguments + /// + /// * `safe_message`: Event safe message string (from which all unsafe text such as passwords and tokens has been removed). + /// * `full_details`: Event raw message string (which may include unsafe text such as passwords and tokens). + /// * `env_vars`: Event environment variables (which may contains unsafe text such as secrets keys). + pub fn new_with_env_vars( + safe_message: String, + full_details: Option, + env_vars: Option>, + ) -> Self { + EventMessage { + safe_message, + full_details, + env_vars, } } @@ -84,6 +117,7 @@ impl EventMessage { EventMessage { safe_message, full_details: None, + env_vars: None, } } @@ -95,17 +129,35 @@ impl EventMessage { pub fn message(&self, message_verbosity: EventMessageVerbosity) -> String { match message_verbosity { EventMessageVerbosity::SafeOnly => self.safe_message.to_string(), - EventMessageVerbosity::FullDetails => match &self.full_details { + EventMessageVerbosity::FullDetailsWithoutEnvVars => match &self.full_details { None => self.safe_message.to_string(), Some(details) => format!("{} / Full details: {}", self.safe_message, details), }, + EventMessageVerbosity::FullDetails => match &self.full_details { + None => self.safe_message.to_string(), + Some(details) => match &self.env_vars { + None => format!("{} / Full details: {}", self.safe_message, details), + Some(env_vars) => { + format!( + "{} / Full details: {} / Env vars: {}", + self.safe_message, + details, + env_vars + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(" "), + ) + } + }, + }, } } } impl From for EventMessage { fn from(e: CommandError) -> Self { - EventMessage::new(e.message_raw(), e.message_safe()) + EventMessage::new_with_env_vars(e.message_raw(), e.message_safe(), e.env_vars()) } } @@ -420,27 +472,61 @@ mod tests { #[test] fn test_event_message() { // setup: - let test_cases: Vec<(String, Option, EventMessageVerbosity, String)> = vec![ + let test_cases: Vec<( + String, + Option, + Option>, + EventMessageVerbosity, + String, + )> = vec![ ( "safe".to_string(), Some("raw".to_string()), + Some(vec![("env".to_string(), "value".to_string())]), EventMessageVerbosity::SafeOnly, "safe".to_string(), ), - ("safe".to_string(), None, EventMessageVerbosity::SafeOnly, "safe".to_string()), - ("safe".to_string(), None, EventMessageVerbosity::FullDetails, "safe".to_string()), + ( + "safe".to_string(), + None, + None, + EventMessageVerbosity::SafeOnly, + "safe".to_string(), + ), + ( + "safe".to_string(), + None, + None, + EventMessageVerbosity::FullDetails, + "safe".to_string(), + ), ( "safe".to_string(), Some("raw".to_string()), + None, EventMessageVerbosity::FullDetails, "safe / Full details: raw".to_string(), ), + ( + "safe".to_string(), + Some("raw".to_string()), + Some(vec![("env".to_string(), "value".to_string())]), + EventMessageVerbosity::FullDetailsWithoutEnvVars, + "safe / Full details: raw".to_string(), + ), + ( + "safe".to_string(), + Some("raw".to_string()), + Some(vec![("env".to_string(), "value".to_string())]), + EventMessageVerbosity::FullDetails, + "safe / Full details: raw / Env vars: env=value".to_string(), + ), ]; for tc in test_cases { // execute: - let (safe_message, raw_message, verbosity, expected) = tc; - let event_message = EventMessage::new(safe_message, raw_message); + let (safe_message, raw_message, env_vars, verbosity, expected) = tc; + let event_message = EventMessage::new_with_env_vars(safe_message, raw_message, env_vars); // validate: assert_eq!(expected, event_message.message(verbosity)); diff --git a/src/io_models.rs b/src/io_models.rs index 702348fd..4f27cf82 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -33,6 +33,7 @@ use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; +use crate::errors::ErrorMessageVerbosity; use crate::logger::Logger; use crate::models; use crate::models::application::{ApplicationError, ApplicationService}; @@ -829,7 +830,13 @@ impl Database { Some(db) } Err(e) => { - error!("{}", format!("error while parsing postgres version, error: {}", e.message())); + error!( + "{}", + format!( + "error while parsing postgres version, error: {}", + e.message(ErrorMessageVerbosity::FullDetails) + ) + ); None } }, @@ -854,7 +861,13 @@ impl Database { Some(db) } Err(e) => { - error!("{}", format!("error while parsing mysql version, error: {}", e.message())); + error!( + "{}", + format!( + "error while parsing mysql version, error: {}", + e.message(ErrorMessageVerbosity::FullDetails) + ) + ); None } }, diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 3b553377..176df25c 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -539,7 +539,7 @@ where cluster_name.clone().as_str(), ) { Ok(kubeconfig) => kubeconfig, - Err(e) => return OperationResult::Retry(CommandError::new(e.message(), Some(e.message()))), + Err(e) => return OperationResult::Retry(e), }; match kubeconfig { From 5bb6e4a37dcae891ea34c4d5d25670f4213e0b9f Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 30 Mar 2022 20:44:19 +0200 Subject: [PATCH 009/122] fix: get cluster infos on aws UI (#672) --- .../bootstrap/charts/aws-ui-view/.helmignore | 22 +++++++++ .../bootstrap/charts/aws-ui-view/Chart.yaml | 5 ++ .../charts/aws-ui-view/templates/_helpers.tpl | 47 +++++++++++++++++++ .../aws-ui-view/templates/clusterrole.yaml | 35 ++++++++++++++ .../templates/clusterrolebinding.yaml | 12 +++++ .../bootstrap/charts/aws-ui-view/values.yaml | 3 ++ .../aws/kubernetes/helm_charts.rs | 10 ++++ 7 files changed, 134 insertions(+) create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/.helmignore create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml b/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml new file mode 100644 index 00000000..6385e4d6 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1 +description: A Helm chart for the AWS UI View +name: aws-ui-view +version: 1.0.0 diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl new file mode 100644 index 00000000..76e96336 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-ui-view.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-ui-view.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-ui-view.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-ui-view.labels" -}} +app.kubernetes.io/name: {{ include "aws-ui-view.name" . }} +helm.sh/chart: {{ include "aws-ui-view.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml new file mode 100644 index 00000000..dff89b24 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: {{ include "aws-ui-view.fullname" . }} +rules: + - apiGroups: + - '*' + resources: + - nodes + - namespaces + - pods + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: + - get + - list + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list \ No newline at end of file diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..16802963 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "aws-ui-view.fullname" . }} +subjects: + - kind: Group + name: Admins + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ include "aws-ui-view.fullname" . }} + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/lib/aws/bootstrap/charts/aws-ui-view/values.yaml b/lib/aws/bootstrap/charts/aws-ui-view/values.yaml new file mode 100644 index 00000000..299bcc74 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/values.yaml @@ -0,0 +1,3 @@ +nameOverride: aws-ui-view + +fullnameOverride: "aws-ui-view" \ No newline at end of file diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 081a9c7d..ac921c17 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -253,6 +253,15 @@ pub fn aws_helm_charts( }, }; + let aws_ui_view = CommonChart { + chart_info: ChartInfo { + name: "aws-ui-view".to_string(), + path: chart_path("charts/aws-ui-view"), + namespace: HelmChartNamespaces::KubeSystem, + ..Default::default() + }, + }; + let cluster_autoscaler = CommonChart { chart_info: ChartInfo { name: "cluster-autoscaler".to_string(), @@ -1157,6 +1166,7 @@ datasources: Box::new(q_storage_class), Box::new(coredns_config), Box::new(aws_vpc_cni_chart), + Box::new(aws_ui_view), ]; let level_2: Vec> = vec![Box::new(cert_manager)]; From 2a6760ff87b813648b36e536755ee49076c3b1cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 30 Mar 2022 23:01:34 +0200 Subject: [PATCH 010/122] Refacto databases (#676) Refacto databases --- src/cloud_provider/aws/databases/mod.rs | 5 - src/cloud_provider/aws/databases/mongodb.rs | 451 ------------ src/cloud_provider/aws/databases/mysql.rs | 473 ------------- .../aws/databases/postgresql.rs | 468 ------------- src/cloud_provider/aws/databases/redis.rs | 461 ------------ src/cloud_provider/aws/databases/utilities.rs | 58 -- src/cloud_provider/aws/mod.rs | 1 - .../digitalocean/databases/mod.rs | 4 - .../digitalocean/databases/mongodb.rs | 384 ---------- .../digitalocean/databases/mysql.rs | 388 ---------- .../digitalocean/databases/postgresql.rs | 390 ----------- .../digitalocean/databases/redis.rs | 385 ---------- .../digitalocean/kubernetes/doks_api.rs | 2 +- .../digitalocean/kubernetes/mod.rs | 3 +- src/cloud_provider/digitalocean/mod.rs | 1 - src/cloud_provider/kubernetes.rs | 6 +- src/cloud_provider/scaleway/databases/mod.rs | 4 - .../scaleway/databases/mongodb.rs | 385 ---------- .../scaleway/databases/mysql.rs | 417 ----------- .../scaleway/databases/postgresql.rs | 426 ----------- .../scaleway/databases/redis.rs | 382 ---------- src/cloud_provider/scaleway/mod.rs | 1 - src/cloud_provider/service.rs | 31 +- src/cloud_provider/utilities.rs | 303 +------- src/errors/mod.rs | 2 +- src/io_models.rs | 661 +++++++++++------- src/models/application.rs | 36 +- src/models/aws/database.rs | 326 +++++++++ src/models/aws/database_utils.rs | 198 ++++++ src/models/aws/mod.rs | 8 +- src/models/database.rs | 508 ++++++++++++++ src/models/database_utils.rs | 199 ++++++ src/models/digital_ocean/database.rs | 152 ++++ src/models/digital_ocean/mod.rs | 3 +- src/models/mod.rs | 2 + src/models/router.rs | 4 +- src/models/scaleway/database.rs | 293 ++++++++ src/models/scaleway/database_utils.rs | 36 + src/models/scaleway/mod.rs | 4 +- src/models/types.rs | 106 ++- tests/aws/aws_databases.rs | 2 +- tests/scaleway/scw_databases.rs | 2 +- 42 files changed, 2275 insertions(+), 5696 deletions(-) delete mode 100644 src/cloud_provider/aws/databases/mod.rs delete mode 100644 src/cloud_provider/aws/databases/mongodb.rs delete mode 100644 src/cloud_provider/aws/databases/mysql.rs delete mode 100644 src/cloud_provider/aws/databases/postgresql.rs delete mode 100644 src/cloud_provider/aws/databases/redis.rs delete mode 100644 src/cloud_provider/aws/databases/utilities.rs delete mode 100644 src/cloud_provider/digitalocean/databases/mod.rs delete mode 100644 src/cloud_provider/digitalocean/databases/mongodb.rs delete mode 100644 src/cloud_provider/digitalocean/databases/mysql.rs delete mode 100644 src/cloud_provider/digitalocean/databases/postgresql.rs delete mode 100644 src/cloud_provider/digitalocean/databases/redis.rs delete mode 100644 src/cloud_provider/scaleway/databases/mod.rs delete mode 100644 src/cloud_provider/scaleway/databases/mongodb.rs delete mode 100644 src/cloud_provider/scaleway/databases/mysql.rs delete mode 100644 src/cloud_provider/scaleway/databases/postgresql.rs delete mode 100644 src/cloud_provider/scaleway/databases/redis.rs create mode 100644 src/models/aws/database.rs create mode 100644 src/models/aws/database_utils.rs create mode 100644 src/models/database.rs create mode 100644 src/models/database_utils.rs create mode 100644 src/models/digital_ocean/database.rs create mode 100644 src/models/scaleway/database.rs create mode 100644 src/models/scaleway/database_utils.rs diff --git a/src/cloud_provider/aws/databases/mod.rs b/src/cloud_provider/aws/databases/mod.rs deleted file mode 100644 index 53cb27d3..00000000 --- a/src/cloud_provider/aws/databases/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod mongodb; -pub mod mysql; -pub mod postgresql; -pub mod redis; -pub mod utilities; diff --git a/src/cloud_provider/aws/databases/mongodb.rs b/src/cloud_provider/aws/databases/mongodb.rs deleted file mode 100644 index 9f8af5fc..00000000 --- a/src/cloud_provider/aws/databases/mongodb.rs +++ /dev/null @@ -1,451 +0,0 @@ -use std::collections::HashMap; - -use crate::cloud_provider::aws::databases::utilities::aws_final_snapshot_name; -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{ - generate_supported_version, get_self_hosted_mongodb_version, get_supported_version_to_use, print_action, -}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct MongoDbAws { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MongoDbAws { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - MongoDbAws { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - get_mongodb_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "mongodb" - } -} - -impl StatefulService for MongoDbAws { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl Service for MongoDbAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MongoDB(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - // https://docs.aws.amazon.com/documentdb/latest/developerguide/limits.html#limits-naming_constraints - let prefix = "mongodb"; - let max_size = 60 - prefix.len(); // 63 (max DocumentDB) - 3 (k8s statefulset chars) - let mut new_name = format!("{}{}", prefix, self.name().replace('_', "").replace('-', "")); - if new_name.chars().count() > max_size { - new_name = new_name[..max_size].to_string(); - } - - new_name - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, target.kubernetes, target.environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = self - .matching_correct_version(self.is_managed_service(), event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_db_name", self.name.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("encrypt_disk", &self.options.encrypt_disk); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - - context.insert("publicly_accessible", &self.options.publicly_accessible); - context.insert("skip_final_snapshot", &false); - context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MongoDbAws {} - -impl ToTransmitter for MongoDbAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Helm for MongoDbAws { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mongodb-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mongodb", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/aws/chart_values/mongodb", self.context.lib_root_dir()) // FIXME replace `chart_values` by `charts_values` - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MongoDbAws { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/aws/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/aws/services/mongodb", self.context.lib_root_dir()) - } -} - -impl Create for MongoDbAws { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), &*self.logger) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Pause for MongoDbAws { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for MongoDbAws { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for MongoDbAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -fn get_mongodb_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - get_managed_mongodb_version(requested_version) - } else { - get_self_hosted_mongodb_version(requested_version) - } -} - -fn get_managed_mongodb_version(requested_version: String) -> Result { - let mut supported_mongodb_versions = HashMap::new(); - - // v3.6.0 - let mongo_version = generate_supported_version(3, 6, 6, Some(0), Some(0), None); - supported_mongodb_versions.extend(mongo_version); - - // v4.0.0 - let mongo_version = generate_supported_version(4, 0, 0, Some(0), Some(0), None); - supported_mongodb_versions.extend(mongo_version); - - get_supported_version_to_use("DocumentDB", supported_mongodb_versions, requested_version) -} - -#[cfg(test)] -mod tests_mongodb { - use crate::cloud_provider::aws::databases::mongodb::get_mongodb_version; - use crate::errors::ErrorMessageVerbosity; - - #[test] - fn check_mongodb_version() { - // managed version - assert_eq!(get_mongodb_version("4".to_string(), true).unwrap(), "4.0.0"); - assert_eq!(get_mongodb_version("4.0".to_string(), true).unwrap(), "4.0.0"); - assert!(get_mongodb_version("4.4".to_string(), true) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("DocumentDB 4.4 version is not supported")); - // self-hosted version - assert_eq!(get_mongodb_version("4".to_string(), false).unwrap(), "4.4.4"); - assert_eq!(get_mongodb_version("4.2".to_string(), false).unwrap(), "4.2.12"); - assert!(get_mongodb_version("3.4".to_string(), false) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("MongoDB 3.4 version is not supported")); - } -} diff --git a/src/cloud_provider/aws/databases/mysql.rs b/src/cloud_provider/aws/databases/mysql.rs deleted file mode 100644 index 81913fde..00000000 --- a/src/cloud_provider/aws/databases/mysql.rs +++ /dev/null @@ -1,473 +0,0 @@ -use std::collections::HashMap; - -use tera::Context as TeraContext; - -use crate::cloud_provider::aws::databases::utilities::{aws_final_snapshot_name, get_parameter_group_from_version}; -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{ - generate_supported_version, get_self_hosted_mysql_version, get_supported_version_to_use, managed_db_name_sanitizer, - print_action, -}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, DatabaseKind, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct MySQLAws { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MySQLAws { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - get_mysql_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "mysql" - } -} - -impl StatefulService for MySQLAws { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for MySQLAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for MySQLAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MySQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints - let prefix = "mysql"; - let max_size = 63 - 3; // max RDS - k8s statefulset chars - managed_db_name_sanitizer(max_size, prefix, self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = &self.matching_correct_version(self.is_managed_service(), event_details.clone())?; - context.insert("version", &version.matched_version().to_string()); - - if self.is_managed_service() { - let parameter_group_family = - match get_parameter_group_from_version(version.matched_version(), DatabaseKind::Mysql) { - Ok(v) => v, - Err(e) => { - return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details, - "MySQL".to_string(), - "parameter_group_family".to_string(), - version.matched_version().to_string(), - Some(e), - )) - } - }; - context.insert("parameter_group_family", ¶meter_group_family); - }; - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("encrypt_disk", &self.options.encrypt_disk); - context.insert("database_name", &self.sanitized_name()); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - - context.insert("skip_final_snapshot", &false); - context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - context.insert("publicly_accessible", &self.options.publicly_accessible); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MySQLAws {} - -impl Helm for MySQLAws { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mysql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mysql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/aws/chart_values/mysql", self.context.lib_root_dir()) // FIXME replace `chart_values` by `charts_values` - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MySQLAws { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/aws/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/aws/services/mysql", self.context.lib_root_dir()) - } -} - -impl Create for MySQLAws { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for MySQLAws { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Delete for MySQLAws { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for MySQLAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -fn get_mysql_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - get_managed_mysql_version(requested_version) - } else { - get_self_hosted_mysql_version(requested_version) - } -} - -fn get_managed_mysql_version(requested_version: String) -> Result { - let mut supported_mysql_versions = HashMap::new(); - // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt - - // v5.7 - let mut v57 = generate_supported_version(5, 7, 7, Some(16), Some(34), None); - v57.remove("5.7.32"); - v57.remove("5.7.29"); - v57.remove("5.7.27"); - v57.remove("5.7.20"); - v57.remove("5.7.18"); - supported_mysql_versions.extend(v57); - - // v8 - let mut v8 = generate_supported_version(8, 0, 0, Some(11), Some(26), None); - v8.remove("8.0.24"); - v8.remove("8.0.22"); - v8.remove("8.0.18"); - v8.remove("8.0.14"); - v8.remove("8.0.12"); - supported_mysql_versions.extend(v8); - - get_supported_version_to_use("RDS MySQL", supported_mysql_versions, requested_version) -} - -#[cfg(test)] -mod tests_mysql { - use crate::cloud_provider::aws::databases::mysql::get_mysql_version; - use crate::errors::ErrorMessageVerbosity; - - #[test] - fn check_mysql_version() { - // managed version - assert_eq!(get_mysql_version("8".to_string(), true).unwrap(), "8.0.26"); - assert_eq!(get_mysql_version("8.0".to_string(), true).unwrap(), "8.0.26"); - assert_eq!(get_mysql_version("8.0.16".to_string(), true).unwrap(), "8.0.16"); - assert!(get_mysql_version("8.0.18".to_string(), true) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("RDS MySQL 8.0.18 version is not supported")); - // self-hosted version - assert_eq!(get_mysql_version("5".to_string(), false).unwrap(), "5.7.34"); - assert_eq!(get_mysql_version("5.7".to_string(), false).unwrap(), "5.7.34"); - assert_eq!(get_mysql_version("5.7.31".to_string(), false).unwrap(), "5.7.31"); - assert!(get_mysql_version("1.0".to_string(), false) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("MySQL 1.0 version is not supported")); - } -} diff --git a/src/cloud_provider/aws/databases/postgresql.rs b/src/cloud_provider/aws/databases/postgresql.rs deleted file mode 100644 index e3f076f0..00000000 --- a/src/cloud_provider/aws/databases/postgresql.rs +++ /dev/null @@ -1,468 +0,0 @@ -use std::collections::HashMap; - -use crate::cloud_provider::aws::databases::utilities::aws_final_snapshot_name; -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{ - generate_supported_version, get_self_hosted_postgres_version, get_supported_version_to_use, - managed_db_name_sanitizer, print_action, -}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct PostgreSQLAws { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl PostgreSQLAws { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - PostgreSQLAws { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - get_postgres_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "postgresql" - } -} - -impl StatefulService for PostgreSQLAws { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for PostgreSQLAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for PostgreSQLAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::PostgreSQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints - let prefix = "postgresql"; - let max_size = 63 - 3; // max RDS - k8s statefulset chars - managed_db_name_sanitizer(max_size, prefix, self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = self - .matching_correct_version(self.is_managed_service(), event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_name", self.sanitized_name().as_str()); - context.insert("database_db_name", self.name()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("encrypt_disk", &self.options.encrypt_disk); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - - context.insert("skip_final_snapshot", &false); - context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - - context.insert("publicly_accessible", &self.options.publicly_accessible); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for PostgreSQLAws {} - -impl Helm for PostgreSQLAws { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("postgresql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/postgresql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/aws/chart_values/postgresql", self.context.lib_root_dir()) // FIXME replace `chart_values` by `charts_values` - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for PostgreSQLAws { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/aws/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/aws/services/postgresql", self.context.lib_root_dir()) - } -} - -impl Create for PostgreSQLAws { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for PostgreSQLAws { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for PostgreSQLAws { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Listen for PostgreSQLAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -fn get_postgres_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - get_managed_postgres_version(requested_version) - } else { - get_self_hosted_postgres_version(requested_version) - } -} - -fn get_managed_postgres_version(requested_version: String) -> Result { - let mut supported_postgres_versions = HashMap::new(); - - // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts - - // v10 - let mut v10 = generate_supported_version(10, 1, 18, None, None, None); - v10.remove("10.2"); // non supported version by AWS - v10.remove("10.8"); // non supported version by AWS - supported_postgres_versions.extend(v10); - - // v11 - let mut v11 = generate_supported_version(11, 1, 13, None, None, None); - v11.remove("11.3"); // non supported version by AWS - supported_postgres_versions.extend(v11); - - // v12 - let v12 = generate_supported_version(12, 2, 8, None, None, None); - supported_postgres_versions.extend(v12); - - // v13 - let v13 = generate_supported_version(13, 1, 4, None, None, None); - supported_postgres_versions.extend(v13); - - get_supported_version_to_use("Postgresql", supported_postgres_versions, requested_version) -} - -#[cfg(test)] -mod tests_postgres { - use crate::cloud_provider::aws::databases::postgresql::get_postgres_version; - use crate::errors::ErrorMessageVerbosity; - - #[test] - fn check_postgres_version() { - // managed version - assert_eq!(get_postgres_version("12".to_string(), true).unwrap(), "12.8"); - assert_eq!(get_postgres_version("12.3".to_string(), true).unwrap(), "12.3"); - assert!(get_postgres_version("12.3.0".to_string(), true) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Postgresql 12.3.0 version is not supported")); - assert!(get_postgres_version("11.3".to_string(), true) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Postgresql 11.3 version is not supported")); - // self-hosted version - assert_eq!(get_postgres_version("12".to_string(), false).unwrap(), "12.8.0"); - assert_eq!(get_postgres_version("12.8".to_string(), false).unwrap(), "12.8.0"); - assert_eq!(get_postgres_version("12.3.0".to_string(), false).unwrap(), "12.3.0"); - assert!(get_postgres_version("1.0".to_string(), false) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Postgresql 1.0 version is not supported")); - } -} diff --git a/src/cloud_provider/aws/databases/redis.rs b/src/cloud_provider/aws/databases/redis.rs deleted file mode 100644 index df420ae3..00000000 --- a/src/cloud_provider/aws/databases/redis.rs +++ /dev/null @@ -1,461 +0,0 @@ -use std::collections::HashMap; - -use crate::cloud_provider::aws::databases::utilities::aws_final_snapshot_name; -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_redis_version, get_supported_version_to_use, print_action}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct RedisAws { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl RedisAws { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - get_redis_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "redis" - } -} - -impl StatefulService for RedisAws { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for RedisAws { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for RedisAws { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::Redis(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - // https://aws.amazon.com/about-aws/whats-new/2019/08/elasticache_supports_50_chars_cluster_name - let prefix = "redis"; - let max_size = 47 - prefix.len(); // 50 (max Elasticache ) - 3 (k8s statefulset chars) - let mut new_name = self.name().replace('_', "").replace('-', ""); - - if new_name.chars().count() > max_size { - new_name = new_name[..max_size].to_string(); - } - - format!("{}{}", prefix, new_name) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - let version = self - .matching_correct_version(self.is_managed_service(), event_details.clone())? - .matched_version() - .to_string(); - - let parameter_group_name = if version.starts_with("5.") { - "default.redis5.0" - } else if version.starts_with("6.") { - "default.redis6.x" - } else { - return Err(EngineError::new_terraform_unsupported_context_parameter_value( - event_details, - "Elasicache".to_string(), - "database_elasticache_parameter_group_name".to_string(), - format!("default.redis{}", version), - None, - )); - }; - - context.insert("database_elasticache_parameter_group_name", parameter_group_name); - - context.insert("namespace", environment.namespace()); - context.insert("version", version.as_str()); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - context.insert("skip_final_snapshot", &false); - context.insert("final_snapshot_name", &aws_final_snapshot_name(self.id())); - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for RedisAws {} - -impl Helm for RedisAws { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("redis-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/redis", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/aws/chart_values/redis", self.context.lib_root_dir()) // FIXME replace `chart_values` by `charts_values` - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for RedisAws { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/aws/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/aws/services/redis", self.context.lib_root_dir()) - } -} - -impl Create for RedisAws { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Pause for RedisAws { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for RedisAws { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for RedisAws { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -fn get_redis_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - get_managed_redis_version(requested_version) - } else { - get_self_hosted_redis_version(requested_version) - } -} - -fn get_managed_redis_version(requested_version: String) -> Result { - let mut supported_redis_versions = HashMap::with_capacity(2); - // https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/supported-engine-versions.html - - supported_redis_versions.insert("6".to_string(), "6.x".to_string()); - supported_redis_versions.insert("5".to_string(), "5.0.6".to_string()); - - get_supported_version_to_use("Elasticache", supported_redis_versions, requested_version) -} - -#[cfg(test)] -mod tests { - use crate::cloud_provider::aws::databases::redis::get_redis_version; - use crate::errors::ErrorMessageVerbosity; - - #[test] - fn check_redis_version() { - // managed version - assert_eq!(get_redis_version("6".to_string(), true).unwrap(), "6.x"); - assert_eq!(get_redis_version("5".to_string(), true).unwrap(), "5.0.6"); - assert!(get_redis_version("1.0".to_string(), true) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Elasticache 1.0 version is not supported")); - - // self-hosted version - assert_eq!(get_redis_version("6".to_string(), false).unwrap(), "6.0.9"); - assert_eq!(get_redis_version("6.0".to_string(), false).unwrap(), "6.0.9"); - assert!(get_redis_version("1.0".to_string(), false) - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Redis 1.0 version is not supported")); - } -} diff --git a/src/cloud_provider/aws/databases/utilities.rs b/src/cloud_provider/aws/databases/utilities.rs deleted file mode 100644 index 99d8e197..00000000 --- a/src/cloud_provider/aws/databases/utilities.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::cloud_provider::utilities::VersionsNumber; -use crate::errors::CommandError; -use crate::io_models::DatabaseKind; - -pub fn get_parameter_group_from_version( - version: VersionsNumber, - database_kind: DatabaseKind, -) -> Result { - if version.minor.is_none() { - return Err(CommandError::new_from_safe_message(format!( - "Can't determine the minor version, to select parameter group for {:?} version {}", - database_kind, version - ))); - }; - - match database_kind { - DatabaseKind::Mysql => Ok(format!("mysql{}.{}", version.major, version.minor.unwrap())), - _ => Ok("".to_string()), - } -} - -// name of the last snapshot before the database get deleted -pub fn aws_final_snapshot_name(database_name: &str) -> String { - format!("qovery-{}-final-snap", database_name) -} - -#[cfg(test)] -mod tests_aws_databases_parameters { - use crate::cloud_provider::aws::databases::utilities::get_parameter_group_from_version; - use crate::cloud_provider::utilities::VersionsNumber; - use crate::errors::ErrorMessageVerbosity; - use crate::io_models::DatabaseKind; - use std::str::FromStr; - - #[test] - fn check_rds_mysql_parameter_groups() { - let mysql_parameter_group = get_parameter_group_from_version( - VersionsNumber::from_str("5.7.0").expect("error while trying to get version from str"), - DatabaseKind::Mysql, - ); - assert_eq!(mysql_parameter_group.unwrap(), "mysql5.7"); - - let mysql_parameter_group = get_parameter_group_from_version( - VersionsNumber::from_str("8.0").expect("error while trying to get version from str"), - DatabaseKind::Mysql, - ); - assert_eq!(mysql_parameter_group.unwrap(), "mysql8.0"); - - let mysql_parameter_group = get_parameter_group_from_version( - VersionsNumber::from_str("8").expect("error while trying to get version from str"), - DatabaseKind::Mysql, - ); - assert!(mysql_parameter_group - .unwrap_err() - .message(ErrorMessageVerbosity::FullDetails) - .contains("Can't determine the minor version, to select parameter group for Mysql version 8")); - } -} diff --git a/src/cloud_provider/aws/mod.rs b/src/cloud_provider/aws/mod.rs index fd93b941..f5a30786 100644 --- a/src/cloud_provider/aws/mod.rs +++ b/src/cloud_provider/aws/mod.rs @@ -12,7 +12,6 @@ use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; use crate::runtime::block_on; -pub mod databases; pub mod kubernetes; pub mod regions; diff --git a/src/cloud_provider/digitalocean/databases/mod.rs b/src/cloud_provider/digitalocean/databases/mod.rs deleted file mode 100644 index 83079909..00000000 --- a/src/cloud_provider/digitalocean/databases/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod mongodb; -pub mod mysql; -pub mod postgresql; -pub mod redis; diff --git a/src/cloud_provider/digitalocean/databases/mongodb.rs b/src/cloud_provider/digitalocean/databases/mongodb.rs deleted file mode 100644 index 1c179c2f..00000000 --- a/src/cloud_provider/digitalocean/databases/mongodb.rs +++ /dev/null @@ -1,384 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_mongodb_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct MongoDo { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MongoDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - MongoDo { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mongodb_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "mongodb" - } -} - -impl StatefulService for MongoDo { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for MongoDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for MongoDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MongoDB(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("mongodb", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_db_name", self.name.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MongoDo {} - -impl Helm for MongoDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mongodb-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mongodb", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/digitalocean/chart_values/mongodb", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MongoDo { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/mongodb", self.context.lib_root_dir()) - } -} - -impl Create for MongoDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for MongoDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for MongoDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Listen for MongoDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/digitalocean/databases/mysql.rs b/src/cloud_provider/digitalocean/databases/mysql.rs deleted file mode 100644 index 5bffb434..00000000 --- a/src/cloud_provider/digitalocean/databases/mysql.rs +++ /dev/null @@ -1,388 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_mysql_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct MySQLDo { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MySQLDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mysql_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "mysql" - } -} - -impl StatefulService for MySQLDo { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for MySQLDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for MySQLDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MySQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("mysql", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = &self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MySQLDo {} - -impl Helm for MySQLDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mysql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mysql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/digitalocean/chart_values/mysql", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MySQLDo { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/mysql", self.context.lib_root_dir()) - } -} - -impl Create for MySQLDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Create, - Box::new(|| deploy_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - //FIXME : perform an actual check - Ok(()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for MySQLDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for MySQLDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Delete, - Box::new(|| delete_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for MySQLDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/digitalocean/databases/postgresql.rs b/src/cloud_provider/digitalocean/databases/postgresql.rs deleted file mode 100644 index 2b47a106..00000000 --- a/src/cloud_provider/digitalocean/databases/postgresql.rs +++ /dev/null @@ -1,390 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_postgres_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct PostgresDo { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl PostgresDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - PostgresDo { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_postgres_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "postgresql" - } -} - -impl StatefulService for PostgresDo { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for PostgresDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for PostgresDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::PostgreSQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("postgresql", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_db_name", self.name()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } -} - -impl Database for PostgresDo {} - -impl Helm for PostgresDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("postgresql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/postgresql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/digitalocean/chart_values/postgresql", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for PostgresDo { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/postgresql", self.context.lib_root_dir()) - } -} - -impl Create for PostgresDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Create, - Box::new(|| deploy_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for PostgresDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for PostgresDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Delete, - Box::new(|| delete_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Listen for PostgresDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/digitalocean/databases/redis.rs b/src/cloud_provider/digitalocean/databases/redis.rs deleted file mode 100644 index a06684d9..00000000 --- a/src/cloud_provider/digitalocean/databases/redis.rs +++ /dev/null @@ -1,385 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_redis_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct RedisDo { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl RedisDo { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_redis_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "digitalocean" - } - - fn struct_name(&self) -> &str { - "redis" - } -} - -impl StatefulService for RedisDo { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for RedisDo { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for RedisDo { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::Redis(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("redis", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - let version = self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - - context.insert("namespace", environment.namespace()); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for RedisDo {} - -impl Helm for RedisDo { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("redis-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/redis", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/digitalocean/chart_values/redis", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for RedisDo { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/digitalocean/services/redis", self.context.lib_root_dir()) - } -} - -impl Create for RedisDo { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Create, - Box::new(|| deploy_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - //FIXME : perform an actual check - Ok(()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Pause for RedisDo { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Delete for RedisDo { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task( - self, - crate::cloud_provider::service::Action::Pause, - Box::new(|| delete_stateful_service(target, self, event_details.clone(), self.logger())), - ) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for RedisDo { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index a1c2a033..67f3afad 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -1,8 +1,8 @@ use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType}; use crate::cloud_provider::digitalocean::models::doks::KubernetesCluster; use crate::cloud_provider::digitalocean::models::doks::{DoksList, DoksOptions, KubernetesVersion}; -use crate::cloud_provider::utilities::VersionsNumber; use crate::errors::CommandError; +use crate::models::types::VersionsNumber; use std::str::FromStr; pub fn get_doks_info_from_name( diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index ed236cd6..58fc9586 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -25,7 +25,7 @@ use crate::cloud_provider::kubernetes::{ }; use crate::cloud_provider::models::NodeGroups; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::utilities::{print_action, VersionsNumber}; +use crate::cloud_provider::utilities::print_action; use crate::cloud_provider::{kubernetes, CloudProvider}; use crate::cmd::helm::{to_engine_error, Helm}; use crate::cmd::kubectl::{ @@ -45,6 +45,7 @@ use crate::io_models::{ }; use crate::logger::Logger; use crate::models::digital_ocean::DoRegion; +use crate::models::types::VersionsNumber; use crate::object_storage::spaces::{BucketDeleteStrategy, Spaces}; use crate::object_storage::ObjectStorage; use crate::runtime::block_on; diff --git a/src/cloud_provider/digitalocean/mod.rs b/src/cloud_provider/digitalocean/mod.rs index 5aa63ab8..79cb4c8e 100644 --- a/src/cloud_provider/digitalocean/mod.rs +++ b/src/cloud_provider/digitalocean/mod.rs @@ -11,7 +11,6 @@ use crate::errors::EngineError; use crate::events::{EventDetails, GeneralStep, Stage, ToTransmitter, Transmitter}; use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod databases; pub mod do_api_common; pub mod kubernetes; pub mod models; diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index d2585abd..cd89c9a1 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -18,7 +18,6 @@ use crate::cloud_provider::aws::regions::AwsZones; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::models::{CpuLimits, NodeGroups}; use crate::cloud_provider::service::CheckAction; -use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::{service, CloudProvider, DeploymentTarget}; use crate::cmd::kubectl; use crate::cmd::kubectl::{ @@ -36,6 +35,7 @@ use crate::io_models::{ Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath, }; use crate::logger::Logger; +use crate::models::types::VersionsNumber; use crate::object_storage::ObjectStorage; use crate::unit_conversion::{any_to_mi, cpu_string_to_float}; @@ -1425,18 +1425,18 @@ pub fn convert_k8s_cpu_value_to_f32(value: String) -> Result #[cfg(test)] mod tests { use crate::cloud_provider::Kind::Aws; - use std::str::FromStr; use crate::cloud_provider::kubernetes::{ check_kubernetes_upgrade_status, compare_kubernetes_cluster_versions_for_upgrade, convert_k8s_cpu_value_to_f32, validate_k8s_required_cpu_and_burstable, KubernetesNodesType, }; use crate::cloud_provider::models::CpuLimits; - use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd::structs::{KubernetesList, KubernetesNode, KubernetesVersion}; use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter}; use crate::io_models::{ListenersHelper, QoveryIdentifier}; use crate::logger::StdIoLogger; + use crate::models::types::VersionsNumber; + use std::str::FromStr; #[test] pub fn check_kubernetes_upgrade_method() { diff --git a/src/cloud_provider/scaleway/databases/mod.rs b/src/cloud_provider/scaleway/databases/mod.rs deleted file mode 100644 index 83079909..00000000 --- a/src/cloud_provider/scaleway/databases/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod mongodb; -pub mod mysql; -pub mod postgresql; -pub mod redis; diff --git a/src/cloud_provider/scaleway/databases/mongodb.rs b/src/cloud_provider/scaleway/databases/mongodb.rs deleted file mode 100644 index f1b39561..00000000 --- a/src/cloud_provider/scaleway/databases/mongodb.rs +++ /dev/null @@ -1,385 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_mongodb_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct MongoDbScw { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MongoDbScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - MongoDbScw { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_mongodb_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "mongodb" - } -} - -impl StatefulService for MongoDbScw { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for MongoDbScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for MongoDbScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MongoDB(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("mongodb", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_db_name", self.name.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MongoDbScw {} - -impl Helm for MongoDbScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mongodb-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mongodb", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/scaleway/chart_values/mongodb", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MongoDbScw { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/mongodb", self.context.lib_root_dir()) - } -} - -impl Create for MongoDbScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Pause for MongoDbScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for MongoDbScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for MongoDbScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/scaleway/databases/mysql.rs b/src/cloud_provider/scaleway/databases/mysql.rs deleted file mode 100644 index 6d33eb8f..00000000 --- a/src/cloud_provider/scaleway/databases/mysql.rs +++ /dev/null @@ -1,417 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{ - get_self_hosted_mysql_version, get_supported_version_to_use, print_action, sanitize_name, VersionsNumber, -}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; -use std::collections::HashMap; - -pub struct MySQLScw { - context: Context, - id: String, - action: Action, - name: String, - version: VersionsNumber, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl MySQLScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: VersionsNumber, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version, - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - Self::pick_mysql_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn pick_mysql_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - Self::pick_managed_mysql_version(requested_version) - } else { - get_self_hosted_mysql_version(requested_version) - } - } - - fn pick_managed_mysql_version(requested_version: String) -> Result { - // Scaleway supported MySQL versions - // https://api.scaleway.com/rdb/v1/regions/fr-par/database-engines - let mut supported_mysql_versions = HashMap::new(); - - // {"name": "MySQL", "version":"8","end_of_life":"2026-04-01T00:00:00Z"} - supported_mysql_versions.insert("8".to_string(), "8".to_string()); - supported_mysql_versions.insert("8.0".to_string(), "8.0".to_string()); - - get_supported_version_to_use("RDB MySQL", supported_mysql_versions, requested_version) - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "mysql" - } -} - -impl StatefulService for MySQLScw { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for MySQLScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for MySQLScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::MySQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("mysql", self.name()) - } - - fn version(&self) -> String { - self.version.to_string() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = &self - .matching_correct_version(self.is_managed_service(), event_details)? - .matched_version(); - context.insert("version_major", &version.to_major_version_string()); - context.insert("version", &version.to_string()); // Scaleway needs to have major version only - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_name", &self.sanitized_name()); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - - context.insert("publicly_accessible", &self.options.publicly_accessible); - context.insert("activate_high_availability", &self.options.activate_high_availability); - context.insert("activate_backups", &self.options.activate_backups); - - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - context.insert("skip_final_snapshot", &self.context().is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for MySQLScw {} - -impl Helm for MySQLScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("mysql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/mysql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/scaleway/chart_values/mysql", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for MySQLScw { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/mysql", self.context.lib_root_dir()) - } -} - -impl Create for MySQLScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for MySQLScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for MySQLScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for MySQLScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/scaleway/databases/postgresql.rs b/src/cloud_provider/scaleway/databases/postgresql.rs deleted file mode 100644 index d101ecbc..00000000 --- a/src/cloud_provider/scaleway/databases/postgresql.rs +++ /dev/null @@ -1,426 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{ - get_self_hosted_postgres_version, get_supported_version_to_use, print_action, sanitize_name, VersionsNumber, -}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::{CommandError, EngineError}; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; -use std::collections::HashMap; - -pub struct PostgresScw { - context: Context, - id: String, - action: Action, - name: String, - version: VersionsNumber, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl PostgresScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: VersionsNumber, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version, - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version( - &self, - is_managed_services: bool, - event_details: EventDetails, - ) -> Result { - check_service_version( - Self::pick_postgres_version(self.version(), is_managed_services), - self, - event_details, - self.logger(), - ) - } - - fn pick_postgres_version(requested_version: String, is_managed_service: bool) -> Result { - if is_managed_service { - Self::pick_managed_postgres_version(requested_version) - } else { - get_self_hosted_postgres_version(requested_version) - } - } - - fn pick_managed_postgres_version(requested_version: String) -> Result { - // Scaleway supported postgres versions - // https://api.scaleway.com/rdb/v1/regions/fr-par/database-engines - let mut supported_postgres_versions = HashMap::new(); - - // {"name":"PostgreSQL","version":"13","end_of_life":"2025-11-13T00:00:00Z"} - // {"name":"PostgreSQL","version":"12","end_of_life":"2024-11-14T00:00:00Z"} - // {"name":"PostgreSQL","version":"11","end_of_life":"2023-11-09T00:00:00Z"} - // {"name":"PostgreSQL","version":"10","end_of_life":"2022-11-10T00:00:00Z"} - supported_postgres_versions.insert("10".to_string(), "10".to_string()); - supported_postgres_versions.insert("10.0".to_string(), "10.0".to_string()); - supported_postgres_versions.insert("11".to_string(), "11".to_string()); - supported_postgres_versions.insert("11.0".to_string(), "11.0".to_string()); - supported_postgres_versions.insert("12".to_string(), "12".to_string()); - supported_postgres_versions.insert("12.0".to_string(), "12.0".to_string()); - supported_postgres_versions.insert("13".to_string(), "13".to_string()); - supported_postgres_versions.insert("13.0".to_string(), "13.0".to_string()); - - get_supported_version_to_use("RDB postgres", supported_postgres_versions, requested_version) - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "postgresql" - } -} - -impl StatefulService for PostgresScw { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for PostgresScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for PostgresScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::PostgreSQL(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("postgresql", self.name()) - } - - fn version(&self) -> String { - self.version.to_string() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - context.insert("namespace", environment.namespace()); - - let version = &self - .matching_correct_version(self.is_managed_service(), event_details)? - .matched_version(); - context.insert("version_major", &version.to_major_version_string()); - context.insert("version", &version.to_string()); // Scaleway needs to have major version only - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_name", self.sanitized_name().as_str()); - context.insert("database_db_name", self.name()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - - context.insert("publicly_accessible", &self.options.publicly_accessible); - context.insert("activate_high_availability", &self.options.activate_high_availability); - context.insert("activate_backups", &self.options.activate_backups); - context.insert("delete_automated_backups", &self.context().is_test_cluster()); - context.insert("skip_final_snapshot", &self.context().is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for PostgresScw {} - -impl Helm for PostgresScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("postgresql-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/postgresql", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/scaleway/chart_values/postgresql", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for PostgresScw { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/postgresql", self.context.lib_root_dir()) - } -} - -impl Create for PostgresScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Pause for PostgresScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - Ok(()) - } -} - -impl Delete for PostgresScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for PostgresScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/scaleway/databases/redis.rs b/src/cloud_provider/scaleway/databases/redis.rs deleted file mode 100644 index 528152da..00000000 --- a/src/cloud_provider/scaleway/databases/redis.rs +++ /dev/null @@ -1,382 +0,0 @@ -use tera::Context as TeraContext; - -use crate::cloud_provider::service::{ - check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, Database, DatabaseOptions, - DatabaseType, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, -}; -use crate::cloud_provider::utilities::{get_self_hosted_redis_version, print_action, sanitize_name}; -use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; -use crate::cmd::kubectl; -use crate::errors::EngineError; -use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::DatabaseMode::MANAGED; -use crate::io_models::{Context, Listen, Listener, Listeners}; -use crate::logger::Logger; -use ::function_name::named; - -pub struct RedisScw { - context: Context, - id: String, - action: Action, - name: String, - version: String, - fqdn: String, - fqdn_id: String, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: String, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, -} - -impl RedisScw { - pub fn new( - context: Context, - id: &str, - action: Action, - name: &str, - version: &str, - fqdn: &str, - fqdn_id: &str, - total_cpus: String, - total_ram_in_mib: u32, - database_instance_type: &str, - options: DatabaseOptions, - listeners: Listeners, - logger: Box, - ) -> Self { - Self { - context, - action, - id: id.to_string(), - name: name.to_string(), - version: version.to_string(), - fqdn: fqdn.to_string(), - fqdn_id: fqdn_id.to_string(), - total_cpus, - total_ram_in_mib, - database_instance_type: database_instance_type.to_string(), - options, - listeners, - logger, - } - } - - fn matching_correct_version(&self, event_details: EventDetails) -> Result { - check_service_version( - get_self_hosted_redis_version(self.version()), - self, - event_details, - self.logger(), - ) - } - - fn cloud_provider_name(&self) -> &str { - "scaleway" - } - - fn struct_name(&self) -> &str { - "redis" - } -} - -impl StatefulService for RedisScw { - fn as_stateful_service(&self) -> &dyn StatefulService { - self - } - - fn is_managed_service(&self) -> bool { - self.options.mode == MANAGED - } -} - -impl ToTransmitter for RedisScw { - fn to_transmitter(&self) -> Transmitter { - Transmitter::Database(self.id().to_string(), self.service_type().to_string(), self.name().to_string()) - } -} - -impl Service for RedisScw { - fn context(&self) -> &Context { - &self.context - } - - fn service_type(&self) -> ServiceType { - ServiceType::Database(DatabaseType::Redis(&self.options)) - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn sanitized_name(&self) -> String { - sanitize_name("redis", self.name()) - } - - fn version(&self) -> String { - self.version.clone() - } - - fn action(&self) -> &Action { - &self.action - } - - fn private_port(&self) -> Option { - Some(self.options.port) - } - - fn start_timeout(&self) -> Timeout { - Timeout::Value(600) - } - - fn total_cpus(&self) -> String { - self.total_cpus.to_string() - } - - fn cpu_burst(&self) -> String { - unimplemented!() - } - - fn total_ram_in_mib(&self) -> u32 { - self.total_ram_in_mib - } - - fn min_instances(&self) -> u32 { - 1 - } - - fn max_instances(&self) -> u32 { - 1 - } - - fn publicly_accessible(&self) -> bool { - self.options.publicly_accessible - } - - fn tera_context(&self, target: &DeploymentTarget) -> Result { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let kubernetes = target.kubernetes; - let environment = target.environment; - - let mut context = default_tera_context(self, kubernetes, environment); - - // we need the kubernetes config file to store tfstates file in kube secrets - let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; - context.insert("kubeconfig_path", &kube_config_file_path); - - kubectl::kubectl_exec_create_namespace_without_labels( - environment.namespace(), - kube_config_file_path.as_str(), - kubernetes.cloud_provider().credentials_environment_variables(), - ); - - let version = self - .matching_correct_version(event_details)? - .matched_version() - .to_string(); - - context.insert("namespace", environment.namespace()); - context.insert("version", &version); - - for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { - context.insert(k, v); - } - - context.insert("kubernetes_cluster_id", kubernetes.id()); - context.insert("kubernetes_cluster_name", kubernetes.name()); - - context.insert("fqdn_id", self.fqdn_id.as_str()); - context.insert("fqdn", self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str()); - context.insert("service_name", self.fqdn_id.as_str()); - context.insert("database_login", self.options.login.as_str()); - context.insert("database_password", self.options.password.as_str()); - context.insert("database_port", &self.private_port()); - context.insert("database_disk_size_in_gib", &self.options.disk_size_in_gib); - context.insert("database_instance_type", &self.database_instance_type); - context.insert("database_disk_type", &self.options.database_disk_type); - context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); - context.insert("database_total_cpus", &self.total_cpus); - context.insert("database_fqdn", &self.options.host.as_str()); - context.insert("database_id", &self.id()); - context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); - context.insert("tfstate_name", &get_tfstate_name(self)); - context.insert("publicly_accessible", &self.options.publicly_accessible); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - Ok(context) - } - - fn logger(&self) -> &dyn Logger { - &*self.logger - } - - fn selector(&self) -> Option { - Some(format!("app={}", self.sanitized_name())) - } -} - -impl Database for RedisScw {} - -impl Helm for RedisScw { - fn helm_selector(&self) -> Option { - self.selector() - } - - fn helm_release_name(&self) -> String { - crate::string::cut(format!("redis-{}", self.id()), 50) - } - - fn helm_chart_dir(&self) -> String { - format!("{}/common/services/redis", self.context.lib_root_dir()) - } - - fn helm_chart_values_dir(&self) -> String { - format!("{}/scaleway/chart_values/redis", self.context.lib_root_dir()) - } - - fn helm_chart_external_name_service_dir(&self) -> String { - format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) - } -} - -impl Terraform for RedisScw { - fn terraform_common_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/common", self.context.lib_root_dir()) - } - - fn terraform_resource_dir_path(&self) -> String { - format!("{}/scaleway/services/redis", self.context.lib_root_dir()) - } -} - -impl Create for RedisScw { - #[named] - fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || { - deploy_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_create_check(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - self.check_domains(self.listeners.clone(), vec![self.fqdn.as_str()], event_details, self.logger()) - } - - #[named] - fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Pause for RedisScw { - #[named] - fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || { - scale_down_database(target, self, 0) - }) - } - - fn on_pause_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Delete for RedisScw { - #[named] - fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - - send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || { - delete_stateful_service(target, self, event_details.clone(), self.logger()) - }) - } - - fn on_delete_check(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for RedisScw { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} diff --git a/src/cloud_provider/scaleway/mod.rs b/src/cloud_provider/scaleway/mod.rs index 7311f342..cae55f82 100644 --- a/src/cloud_provider/scaleway/mod.rs +++ b/src/cloud_provider/scaleway/mod.rs @@ -6,7 +6,6 @@ use crate::constants::{SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWA use crate::events::{EventDetails, Stage, ToTransmitter, Transmitter}; use crate::io_models::{Context, Listen, Listener, Listeners, QoveryIdentifier}; -pub mod databases; pub mod kubernetes; pub struct Scaleway { diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 2d6da1a1..eed91974 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -10,7 +10,7 @@ use tera::Context as TeraContext; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::ChartInfo; use crate::cloud_provider::kubernetes::Kubernetes; -use crate::cloud_provider::utilities::{check_domain_for, VersionsNumber}; +use crate::cloud_provider::utilities::check_domain_for; use crate::cloud_provider::DeploymentTarget; use crate::cmd; use crate::cmd::helm; @@ -26,6 +26,7 @@ use crate::io_models::{ QoveryIdentifier, }; use crate::logger::Logger; +use crate::models::types::VersionsNumber; pub trait Service: ToTransmitter { fn context(&self) -> &Context; @@ -250,32 +251,32 @@ pub struct DatabaseOptions { } #[derive(Eq, PartialEq)] -pub enum DatabaseType<'a> { - PostgreSQL(&'a DatabaseOptions), - MongoDB(&'a DatabaseOptions), - MySQL(&'a DatabaseOptions), - Redis(&'a DatabaseOptions), +pub enum DatabaseType { + PostgreSQL, + MongoDB, + MySQL, + Redis, } -impl<'a> ToString for DatabaseType<'a> { +impl ToString for DatabaseType { fn to_string(&self) -> String { match self { - DatabaseType::PostgreSQL(_) => "PostgreSQL".to_string(), - DatabaseType::MongoDB(_) => "MongoDB".to_string(), - DatabaseType::MySQL(_) => "MySQL".to_string(), - DatabaseType::Redis(_) => "Redis".to_string(), + DatabaseType::PostgreSQL => "PostgreSQL".to_string(), + DatabaseType::MongoDB => "MongoDB".to_string(), + DatabaseType::MySQL => "MySQL".to_string(), + DatabaseType::Redis => "Redis".to_string(), } } } #[derive(Eq, PartialEq)] -pub enum ServiceType<'a> { +pub enum ServiceType { Application, - Database(DatabaseType<'a>), + Database(DatabaseType), Router, } -impl<'a> ServiceType<'a> { +impl ServiceType { pub fn name(&self) -> String { match self { ServiceType::Application => "Application".to_string(), @@ -285,7 +286,7 @@ impl<'a> ServiceType<'a> { } } -impl<'a> ToString for ServiceType<'a> { +impl<'a> ToString for ServiceType { fn to_string(&self) -> String { self.name() } diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 4c51ac0c..5a0949c0 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -1,8 +1,6 @@ #![allow(clippy::field_reassign_with_default)] -use std::collections::HashMap; - -use crate::errors::{CommandError, EngineError}; +use crate::errors::EngineError; use crate::events::{EngineEvent, EventDetails, EventMessage}; use crate::io_models::{Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope}; use crate::logger::Logger; @@ -12,306 +10,10 @@ use core::result::Result; use core::result::Result::{Err, Ok}; use retry::delay::Fixed; use retry::OperationResult; -use serde::{Deserialize, Serialize}; -use std::fmt; -use std::fmt::Write; -use std::str::FromStr; use trust_dns_resolver::config::*; use trust_dns_resolver::proto::rr::{RData, RecordType}; use trust_dns_resolver::Resolver; -pub fn get_self_hosted_postgres_version(requested_version: String) -> Result { - let mut supported_postgres_versions = HashMap::new(); - - // https://hub.docker.com/r/bitnami/postgresql/tags?page=1&ordering=last_updated - - // v10 - let v10 = generate_supported_version(10, 1, 16, Some(0), Some(0), None); - supported_postgres_versions.extend(v10); - - // v11 - let v11 = generate_supported_version(11, 1, 11, Some(0), Some(0), None); - supported_postgres_versions.extend(v11); - - // v12 - let v12 = generate_supported_version(12, 2, 8, Some(0), Some(0), None); - supported_postgres_versions.extend(v12); - - // v13 - let v13 = generate_supported_version(13, 1, 4, Some(0), Some(0), None); - supported_postgres_versions.extend(v13); - - get_supported_version_to_use("Postgresql", supported_postgres_versions, requested_version) -} - -pub fn get_self_hosted_mysql_version(requested_version: String) -> Result { - let mut supported_mysql_versions = HashMap::new(); - // https://hub.docker.com/r/bitnami/mysql/tags?page=1&ordering=last_updated - - // v5.7 - let v57 = generate_supported_version(5, 7, 7, Some(16), Some(34), None); - supported_mysql_versions.extend(v57); - - // v8 - let v8 = generate_supported_version(8, 0, 0, Some(11), Some(24), None); - supported_mysql_versions.extend(v8); - - get_supported_version_to_use("MySQL", supported_mysql_versions, requested_version) -} - -pub fn get_self_hosted_mongodb_version(requested_version: String) -> Result { - let mut supported_mongodb_versions = HashMap::new(); - - // https://hub.docker.com/r/bitnami/mongodb/tags?page=1&ordering=last_updated - - // v3.6 - let mongo_version = generate_supported_version(3, 6, 6, Some(0), Some(22), None); - supported_mongodb_versions.extend(mongo_version); - - // v4.0 - let mongo_version = generate_supported_version(4, 0, 0, Some(0), Some(23), None); - supported_mongodb_versions.extend(mongo_version); - - // v4.2 - let mongo_version = generate_supported_version(4, 2, 2, Some(0), Some(12), None); - supported_mongodb_versions.extend(mongo_version); - - // v4.4 - let mongo_version = generate_supported_version(4, 4, 4, Some(0), Some(4), None); - supported_mongodb_versions.extend(mongo_version); - - get_supported_version_to_use("MongoDB", supported_mongodb_versions, requested_version) -} - -pub fn get_self_hosted_redis_version(requested_version: String) -> Result { - let mut supported_redis_versions = HashMap::with_capacity(4); - // https://hub.docker.com/r/bitnami/redis/tags?page=1&ordering=last_updated - - supported_redis_versions.insert("6".to_string(), "6.0.9".to_string()); - supported_redis_versions.insert("6.0".to_string(), "6.0.9".to_string()); - supported_redis_versions.insert("5".to_string(), "5.0.10".to_string()); - supported_redis_versions.insert("5.0".to_string(), "5.0.10".to_string()); - - get_supported_version_to_use("Redis", supported_redis_versions, requested_version) -} - -pub fn get_supported_version_to_use( - database_name: &str, - all_supported_versions: HashMap, - version_to_check: String, -) -> Result { - let version = VersionsNumber::from_str(version_to_check.as_str())?; - - // if a patch version is required - if version.patch.is_some() { - return match all_supported_versions.get(&format!( - "{}.{}.{}", - version.major, - version.minor.unwrap(), - version.patch.unwrap() - )) { - Some(version) => Ok(version.to_string()), - None => { - return Err(CommandError::new_from_safe_message(format!( - "{} {} version is not supported", - database_name, version_to_check - ))); - } - }; - } - - // if a minor version is required - if version.minor.is_some() { - return match all_supported_versions.get(&format!("{}.{}", version.major, version.minor.unwrap())) { - Some(version) => Ok(version.to_string()), - None => { - return Err(CommandError::new_from_safe_message(format!( - "{} {} version is not supported", - database_name, version_to_check - ))); - } - }; - }; - - // if only a major version is required - match all_supported_versions.get(&version.major) { - Some(version) => Ok(version.to_string()), - None => { - return Err(CommandError::new_from_safe_message(format!( - "{} {} version is not supported", - database_name, version_to_check - ))); - } - } -} - -// Ease the support of multiple versions by range -pub fn generate_supported_version( - major: i32, - minor_min: i32, - minor_max: i32, - update_min: Option, - update_max: Option, - suffix_version: Option, -) -> HashMap { - let mut supported_versions = HashMap::new(); - let latest_major_version; - - // blank suffix if not requested - let suffix = match suffix_version { - Some(suffix) => suffix, - None => "".to_string(), - }; - - let _ = match update_min { - // manage minor with updates - Some(_) => { - latest_major_version = format!("{}.{}.{}{}", major, minor_max, update_max.unwrap(), suffix); - - if minor_min == minor_max { - // add short minor format targeting latest version - supported_versions.insert(format!("{}.{}", major, minor_max), latest_major_version.clone()); - if update_min.unwrap() == update_max.unwrap() { - let version = format!("{}.{}.{}", major, minor_min, update_min.unwrap()); - supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); - } else { - for update in update_min.unwrap()..update_max.unwrap() + 1 { - let version = format!("{}.{}.{}", major, minor_min, update); - supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); - } - } - } else { - for minor in minor_min..minor_max + 1 { - // add short minor format targeting latest version - supported_versions.insert( - format!("{}.{}", major, minor), - format!("{}.{}.{}", major, minor, update_max.unwrap()), - ); - if update_min.unwrap() == update_max.unwrap() { - let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); - supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); - } else { - for update in update_min.unwrap()..update_max.unwrap() + 1 { - let version = format!("{}.{}.{}", major, minor, update); - supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); - } - } - } - } - } - // manage minor without updates - None => { - latest_major_version = format!("{}.{}{}", major, minor_max, suffix); - for minor in minor_min..minor_max + 1 { - let version = format!("{}.{}", major, minor); - supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); - } - } - }; - - // default major + major.minor supported version - supported_versions.insert(major.to_string(), latest_major_version); - - supported_versions -} - -// unfortunately some proposed versions are not SemVer like Elasticache (6.x) -// this is why we need ot have our own structure -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] -pub struct VersionsNumber { - pub(crate) major: String, - pub(crate) minor: Option, - pub(crate) patch: Option, - pub(crate) suffix: Option, -} - -impl VersionsNumber { - pub fn new(major: String, minor: Option, patch: Option, suffix: Option) -> Self { - VersionsNumber { - major, - minor, - patch, - suffix, - } - } - - pub fn to_major_version_string(&self) -> String { - self.major.clone() - } - - pub fn to_major_minor_version_string(&self, default_minor: &str) -> String { - let test = format!( - "{}.{}", - self.major.clone(), - self.minor.as_ref().unwrap_or(&default_minor.to_string()) - ); - - test - } -} - -impl FromStr for VersionsNumber { - type Err = CommandError; - - fn from_str(version: &str) -> Result { - if version.trim() == "" { - return Err(CommandError::new_from_safe_message("version cannot be empty".to_string())); - } - - let mut version_split = version.splitn(4, '.').map(|v| v.trim()); - - let major = match version_split.next() { - Some(major) => { - let major = major.to_string(); - major.replace('v', "") - } - None => { - return Err(CommandError::new_from_safe_message(format!( - "please check the version you've sent ({}), it can't be checked", - version - ))) - } - }; - - let minor = version_split.next().map(|minor| { - let minor = minor.to_string(); - minor.replace('+', "") - }); - - let patch = version_split.next().map(|patch| patch.to_string()); - - let suffix = version_split.next().map(|suffix| suffix.to_string()); - - // TODO(benjaminch): Handle properly the case where versions are empty - // eq. 1..2 - - Ok(VersionsNumber::new(major, minor, patch, suffix)) - } -} - -impl fmt::Display for VersionsNumber { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&self.major)?; - - if let Some(minor) = &self.minor { - f.write_char('.')?; - f.write_str(minor)?; - } - - if let Some(patch) = &self.patch { - f.write_char('.')?; - f.write_str(patch)?; - } - - if let Some(suffix) = &self.suffix { - f.write_char('.')?; - f.write_str(suffix)?; - } - - Ok(()) - } -} - fn dns_resolvers() -> Vec { let mut resolver_options = ResolverOpts::default(); @@ -559,8 +261,9 @@ pub fn print_action( #[cfg(test)] mod tests { - use crate::cloud_provider::utilities::{dns_resolvers, get_cname_record_value, VersionsNumber}; + use crate::cloud_provider::utilities::{dns_resolvers, get_cname_record_value}; use crate::errors::CommandError; + use crate::models::types::VersionsNumber; use std::str::FromStr; #[test] diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 2831c945..a5708a5b 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -3,7 +3,6 @@ pub mod io; extern crate url; use crate::build_platform::BuildError; -use crate::cloud_provider::utilities::VersionsNumber; use crate::cmd; use crate::cmd::docker::DockerError; use crate::cmd::helm::HelmError; @@ -11,6 +10,7 @@ use crate::container_registry::errors::ContainerRegistryError; use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope}; use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; use crate::io_models::QoveryIdentifier; +use crate::models::types::VersionsNumber; use crate::object_storage::errors::ObjectStorageError; use std::fmt::{Display, Formatter}; use thiserror::Error; diff --git a/src/io_models.rs b/src/io_models.rs index 4f27cf82..9e729919 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -14,34 +14,21 @@ use serde::{Deserialize, Serialize}; use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; -use crate::cloud_provider::aws::databases::mongodb::MongoDbAws; -use crate::cloud_provider::aws::databases::mysql::MySQLAws; -use crate::cloud_provider::aws::databases::postgresql::PostgreSQLAws; -use crate::cloud_provider::aws::databases::redis::RedisAws; -use crate::cloud_provider::digitalocean::databases::mongodb::MongoDo; -use crate::cloud_provider::digitalocean::databases::mysql::MySQLDo; -use crate::cloud_provider::digitalocean::databases::postgresql::PostgresDo; -use crate::cloud_provider::digitalocean::databases::redis::RedisDo; use crate::cloud_provider::environment::Environment; -use crate::cloud_provider::scaleway::databases::mongodb::MongoDbScw; -use crate::cloud_provider::scaleway::databases::mysql::MySQLScw; -use crate::cloud_provider::scaleway::databases::postgresql::PostgresScw; -use crate::cloud_provider::scaleway::databases::redis::RedisScw; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; -use crate::cloud_provider::utilities::VersionsNumber; use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; -use crate::errors::ErrorMessageVerbosity; use crate::logger::Logger; use crate::models; use crate::models::application::{ApplicationError, ApplicationService}; use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; +use crate::models::database::{Container, Managed, MongoDB, MySQL, PostgresSQL, Redis}; use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; use crate::models::router::RouterError; use crate::models::scaleway::{ScwAppExtraSettings, ScwRouterExtraSettings, ScwStorageType}; -use crate::models::types::{AWS, DO, SCW}; +use crate::models::types::{VersionsNumber, AWS, DO, SCW}; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -651,265 +638,415 @@ impl Database { let listeners = cloud_provider.listeners().clone(); - match cloud_provider.kind() { - CPKind::Aws => match self.kind { - DatabaseKind::Postgresql => { - let db = Box::new(PostgreSQLAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + match (cloud_provider.kind(), &self.kind, &self.mode) { + (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Mysql => { - let db = Box::new(MySQLAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Mongodb => { - let db = Box::new(MongoDbAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } - Some(db) - } - DatabaseKind::Redis => { - let db = Box::new(RedisAws::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - }, - CPKind::Do => match self.kind { - DatabaseKind::Postgresql => { - let db = Box::new(PostgresDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Mysql => { - let db = Box::new(MySQLDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Redis => { - let db = Box::new(RedisDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Mongodb => { - let db = Box::new(MongoDo::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - }, - CPKind::Scw => match self.kind { - DatabaseKind::Postgresql => match VersionsNumber::from_str(self.version.as_str()) { - Ok(v) => { - let db = Box::new(PostgresScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + Some(Box::new(db)) + } + (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - Err(e) => { - error!( - "{}", - format!( - "error while parsing postgres version, error: {}", - e.message(ErrorMessageVerbosity::FullDetails) - ) - ); - None - } - }, - DatabaseKind::Mysql => match VersionsNumber::from_str(self.version.as_str()) { - Ok(v) => { - let db = Box::new(MySQLScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - v, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + Some(Box::new(db)) + } - Some(db) - } - Err(e) => { - error!( - "{}", - format!( - "error while parsing mysql version, error: {}", - e.message(ErrorMessageVerbosity::FullDetails) - ) - ); - None - } - }, - DatabaseKind::Redis => { - let db = Box::new(RedisScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger.clone(), - )); + (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => None, + (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - DatabaseKind::Mongodb => { - let db = Box::new(MongoDbScw::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.version.as_str(), - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options, - listeners, - logger, - )); + Some(Box::new(db)) + } + (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::MANAGED) => None, + (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); - Some(db) - } - }, + Some(Box::new(db)) + } + (CPKind::Do, DatabaseKind::Redis, DatabaseMode::MANAGED) => None, + (CPKind::Do, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + + Some(Box::new(db)) + } + (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => None, + (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + + Some(Box::new(db)) + } + + (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + Some(Box::new(db)) + } + (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + Some(Box::new(db)) + } + (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + Some(Box::new(db)) + } + (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + + Some(Box::new(db)) + } + (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::MANAGED) => { + // Not Implemented + None + } + (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + + Some(Box::new(db)) + } + (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { + // Not Implemented + None + } + (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + VersionsNumber::from_str(self.version.as_str()).ok()?, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + ) + .unwrap(); + + Some(Box::new(db)) + } } } } diff --git a/src/models/application.rs b/src/models/application.rs index 296821bf..b40a061e 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -26,23 +26,23 @@ pub enum ApplicationError { pub struct Application { _marker: PhantomData, - pub(crate) context: Context, - pub(crate) id: String, - pub(crate) action: Action, - pub(crate) name: String, - pub(crate) ports: Vec, - pub(crate) total_cpus: String, - pub(crate) cpu_burst: String, - pub(crate) total_ram_in_mib: u32, - pub(crate) min_instances: u32, - pub(crate) max_instances: u32, - pub(crate) start_timeout_in_seconds: u32, - pub(crate) build: Build, - pub(crate) storage: Vec>, - pub(crate) environment_variables: Vec, - pub(crate) listeners: Listeners, - pub(crate) logger: Box, - pub(crate) _extra_settings: T::AppExtraSettings, + pub(super) context: Context, + pub(super) id: String, + pub(super) action: Action, + pub(super) name: String, + pub(super) ports: Vec, + pub(super) total_cpus: String, + pub(super) cpu_burst: String, + pub(super) total_ram_in_mib: u32, + pub(super) min_instances: u32, + pub(super) max_instances: u32, + pub(super) start_timeout_in_seconds: u32, + pub(super) build: Build, + pub(super) storage: Vec>, + pub(super) environment_variables: Vec, + pub(super) listeners: Listeners, + pub(super) logger: Box, + pub(super) _extra_settings: T::AppExtraSettings, } // Here we define the common behavior among all providers @@ -294,7 +294,7 @@ impl Helm for Application { format!( "{}/{}/charts/q-application", self.context.lib_root_dir(), - T::helm_directory_name(), + T::lib_directory_name(), ) } diff --git a/src/models/aws/database.rs b/src/models/aws/database.rs new file mode 100644 index 00000000..a5237851 --- /dev/null +++ b/src/models/aws/database.rs @@ -0,0 +1,326 @@ +use crate::cloud_provider::service::{ + check_service_version, default_tera_context, get_tfstate_name, get_tfstate_suffix, DatabaseOptions, Service, + ServiceVersionCheckResult, +}; +use crate::cloud_provider::{service, DeploymentTarget}; +use crate::cmd::kubectl; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, EventDetails, Stage}; +use crate::models::aws::database_utils::{ + get_managed_mongodb_version, get_managed_mysql_version, get_managed_postgres_version, get_managed_redis_version, +}; +use crate::models::database::{ + Container, Database, DatabaseMode, DatabaseType, Managed, MongoDB, MySQL, PostgresSQL, Redis, +}; + +use crate::models::types::{ToTeraContext, AWS}; +use tera::Context as TeraContext; + +///////////////////////////////////////////////////////////////// +// CONTAINER +impl DatabaseType for PostgresSQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "PostgresSQL" + } + fn lib_directory_name() -> &'static str { + "postgresql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::PostgreSQL + } +} + +impl DatabaseType for MySQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "MySQL" + } + fn lib_directory_name() -> &'static str { + "mysql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::MySQL + } +} + +impl DatabaseType for Redis { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + fn lib_directory_name() -> &'static str { + "redis" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::Redis + } +} + +impl DatabaseType for MongoDB { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + + fn lib_directory_name() -> &'static str { + "mongodb" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MongoDB + } +} + +///////////////////////////////////////////////////////////////// +// MANAGED +impl DatabaseType for PostgresSQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Postgres RDS" + } + fn lib_directory_name() -> &'static str { + "postgresql" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::PostgreSQL + } +} + +impl DatabaseType for MySQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "MySQL RDS" + } + fn lib_directory_name() -> &'static str { + "mysql" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MySQL + } +} + +impl DatabaseType for Redis { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "ElasticCache" + } + fn lib_directory_name() -> &'static str { + "redis" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::Redis + } +} + +impl DatabaseType for MongoDB { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "DocumentDB" + } + + fn lib_directory_name() -> &'static str { + "mongodb" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MongoDB + } +} + +impl> Database +where + Database: Service, +{ + fn get_version_aws_managed(&self, event_details: EventDetails) -> Result { + let fn_version = match T::db_type() { + service::DatabaseType::PostgreSQL => get_managed_postgres_version, + service::DatabaseType::MongoDB => get_managed_mongodb_version, + service::DatabaseType::MySQL => get_managed_mysql_version, + service::DatabaseType::Redis => get_managed_redis_version, + }; + + check_service_version(fn_version(self.version.to_string()), self, event_details, self.logger()) + } + + fn to_tera_context_for_aws_managed( + &self, + target: &DeploymentTarget, + options: &DatabaseOptions, + ) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + + // we need the kubernetes config file to store tfstates file in kube secrets + let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; + context.insert("kubeconfig_path", &kube_config_file_path); + + kubectl::kubectl_exec_create_namespace_without_labels( + environment.namespace(), + kube_config_file_path.as_str(), + kubernetes.cloud_provider().credentials_environment_variables(), + ); + + context.insert("namespace", environment.namespace()); + + let version = self + .get_version_aws_managed(event_details)? + .matched_version() + .to_string(); + context.insert("version", &version); + + // Specific to mysql + if T::db_type() == service::DatabaseType::MySQL { + context.insert( + "parameter_group_family", + &format!( + "mysql{}.{}", + self.version.major, + self.version.minor.as_deref().unwrap_or_default() + ), + ); + } + + // Specific for redis + if T::db_type() == service::DatabaseType::Redis { + let parameter_group_name = if self.version.major == "5" { + "default.redis5.0" + } else if self.version.major == "6" { + "default.redis6.x" + } else { + "redis.unknown" + }; + + context.insert("database_elasticache_parameter_group_name", parameter_group_name); + } + + for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { + context.insert(k, v); + } + + context.insert("kubernetes_cluster_id", kubernetes.id()); + context.insert("kubernetes_cluster_name", kubernetes.name()); + context.insert("fqdn_id", self.fqdn_id.as_str()); + context.insert("fqdn", self.fqdn(target, &self.fqdn, Managed::is_managed()).as_str()); + context.insert("service_name", self.fqdn_id.as_str()); + context.insert("database_name", self.sanitized_name().as_str()); + context.insert("database_db_name", self.name()); + context.insert("database_login", options.login.as_str()); + context.insert("database_password", options.password.as_str()); + context.insert("database_port", &self.private_port()); + context.insert("database_disk_size_in_gib", &options.disk_size_in_gib); + context.insert("database_instance_type", &self.database_instance_type); + context.insert("database_disk_type", &options.database_disk_type); + context.insert("encrypt_disk", &options.encrypt_disk); + context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); + context.insert("database_total_cpus", &self.total_cpus); + context.insert("database_fqdn", &options.host.as_str()); + context.insert("database_id", &self.id()); + context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); + context.insert("tfstate_name", &get_tfstate_name(self)); + context.insert("skip_final_snapshot", &false); + context.insert("final_snapshot_name", &format!("qovery-{}-final-snap", self.id)); + context.insert("delete_automated_backups", &self.context().is_test_cluster()); + context.insert("publicly_accessible", &options.publicly_accessible); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} + +////////////////////////////////////////////////////////////////////////: +// POSTGRES SQL +impl ToTeraContext for Database +where + PostgresSQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_aws_managed(target, &self.options) + } +} + +impl ToTeraContext for Database +where + PostgresSQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MySQL +impl ToTeraContext for Database +where + MySQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_aws_managed(target, &self.options) + } +} + +impl ToTeraContext for Database +where + MySQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MongoDB +impl ToTeraContext for Database +where + MongoDB: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_aws_managed(target, &self.options) + } +} + +impl ToTeraContext for Database +where + MongoDB: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// Redis +impl ToTeraContext for Database +where + Redis: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_aws_managed(target, &self.options) + } +} + +impl ToTeraContext for Database +where + Redis: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context_for_container(target, &self.options) + } +} diff --git a/src/models/aws/database_utils.rs b/src/models/aws/database_utils.rs new file mode 100644 index 00000000..22a270df --- /dev/null +++ b/src/models/aws/database_utils.rs @@ -0,0 +1,198 @@ +use crate::errors::CommandError; +use crate::models::database_utils::{generate_supported_version, get_supported_version_to_use}; +use std::collections::HashMap; + +pub(super) fn get_managed_mysql_version(requested_version: String) -> Result { + let mut supported_mysql_versions = HashMap::new(); + // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt + + // v5.7 + let mut v57 = generate_supported_version(5, 7, 7, Some(16), Some(34), None); + v57.remove("5.7.32"); + v57.remove("5.7.29"); + v57.remove("5.7.27"); + v57.remove("5.7.20"); + v57.remove("5.7.18"); + supported_mysql_versions.extend(v57); + + // v8 + let mut v8 = generate_supported_version(8, 0, 0, Some(11), Some(26), None); + v8.remove("8.0.24"); + v8.remove("8.0.22"); + v8.remove("8.0.18"); + v8.remove("8.0.14"); + v8.remove("8.0.12"); + supported_mysql_versions.extend(v8); + + get_supported_version_to_use("RDS MySQL", supported_mysql_versions, requested_version) +} + +pub(super) fn get_managed_mongodb_version(requested_version: String) -> Result { + let mut supported_mongodb_versions = HashMap::new(); + + // v3.6.0 + let mongo_version = generate_supported_version(3, 6, 6, Some(0), Some(0), None); + supported_mongodb_versions.extend(mongo_version); + + // v4.0.0 + let mongo_version = generate_supported_version(4, 0, 0, Some(0), Some(0), None); + supported_mongodb_versions.extend(mongo_version); + + get_supported_version_to_use("DocumentDB", supported_mongodb_versions, requested_version) +} + +pub(super) fn get_managed_postgres_version(requested_version: String) -> Result { + let mut supported_postgres_versions = HashMap::new(); + + // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts + + // v10 + let mut v10 = generate_supported_version(10, 1, 18, None, None, None); + v10.remove("10.2"); // non supported version by AWS + v10.remove("10.8"); // non supported version by AWS + supported_postgres_versions.extend(v10); + + // v11 + let mut v11 = generate_supported_version(11, 1, 13, None, None, None); + v11.remove("11.3"); // non supported version by AWS + supported_postgres_versions.extend(v11); + + // v12 + let v12 = generate_supported_version(12, 2, 8, None, None, None); + supported_postgres_versions.extend(v12); + + // v13 + let v13 = generate_supported_version(13, 1, 4, None, None, None); + supported_postgres_versions.extend(v13); + + get_supported_version_to_use("Postgresql", supported_postgres_versions, requested_version) +} + +pub(super) fn get_managed_redis_version(requested_version: String) -> Result { + let mut supported_redis_versions = HashMap::with_capacity(2); + // https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/supported-engine-versions.html + + supported_redis_versions.insert("6".to_string(), "6.x".to_string()); + supported_redis_versions.insert("5".to_string(), "5.0.6".to_string()); + + get_supported_version_to_use("Elasticache", supported_redis_versions, requested_version) +} + +#[cfg(test)] +mod tests { + use crate::errors::ErrorMessageVerbosity::SafeOnly; + use crate::models::aws::database_utils::{ + get_managed_mongodb_version, get_managed_mysql_version, get_managed_postgres_version, get_managed_redis_version, + }; + use crate::models::database_utils::{ + get_self_hosted_mongodb_version, get_self_hosted_mysql_version, get_self_hosted_postgres_version, + get_self_hosted_redis_version, + }; + + #[test] + fn check_postgres_version() { + // managed version + assert_eq!(get_managed_postgres_version("12".to_string()).unwrap(), "12.8"); + assert_eq!(get_managed_postgres_version("12.3".to_string()).unwrap(), "12.3"); + assert_eq!( + get_managed_postgres_version("12.3.0".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "Postgresql 12.3.0 version is not supported" + ); + assert_eq!( + get_managed_postgres_version("11.3".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "Postgresql 11.3 version is not supported" + ); + // self-hosted version + assert_eq!(get_self_hosted_postgres_version("12".to_string()).unwrap(), "12.8.0"); + assert_eq!(get_self_hosted_postgres_version("12.8".to_string()).unwrap(), "12.8.0"); + assert_eq!(get_self_hosted_postgres_version("12.3.0".to_string()).unwrap(), "12.3.0"); + assert_eq!( + get_self_hosted_postgres_version("1.0".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "Postgresql 1.0 version is not supported" + ); + } + + #[test] + fn check_redis_version() { + // managed version + assert_eq!(get_managed_redis_version("6".to_string()).unwrap(), "6.x"); + assert_eq!(get_managed_redis_version("5".to_string()).unwrap(), "5.0.6"); + assert_eq!( + get_managed_redis_version("1.0".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "Elasticache 1.0 version is not supported" + ); + + // self-hosted version + assert_eq!(get_self_hosted_redis_version("6".to_string()).unwrap(), "6.0.9"); + assert_eq!(get_self_hosted_redis_version("6.0".to_string()).unwrap(), "6.0.9"); + assert_eq!( + get_self_hosted_redis_version("1.0".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "Redis 1.0 version is not supported" + ); + } + + #[test] + fn check_mysql_version() { + // managed version + assert_eq!(get_managed_mysql_version("8".to_string()).unwrap(), "8.0.26"); + assert_eq!(get_managed_mysql_version("8.0".to_string()).unwrap(), "8.0.26"); + assert_eq!(get_managed_mysql_version("8.0.16".to_string()).unwrap(), "8.0.16"); + assert_eq!( + get_managed_mysql_version("8.0.18".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "RDS MySQL 8.0.18 version is not supported" + ); + // self-hosted version + assert_eq!(get_self_hosted_mysql_version("5".to_string()).unwrap(), "5.7.34"); + assert_eq!(get_self_hosted_mysql_version("5.7".to_string()).unwrap(), "5.7.34"); + assert_eq!(get_self_hosted_mysql_version("5.7.31".to_string()).unwrap(), "5.7.31"); + assert_eq!( + get_self_hosted_mysql_version("1.0".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "MySQL 1.0 version is not supported" + ); + } + + #[test] + fn check_mongodb_version() { + // managed version + assert_eq!(get_managed_mongodb_version("4".to_string()).unwrap(), "4.0.0"); + assert_eq!(get_managed_mongodb_version("4.0".to_string()).unwrap(), "4.0.0"); + assert_eq!( + get_managed_mongodb_version("4.4".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "DocumentDB 4.4 version is not supported" + ); + // self-hosted version + assert_eq!(get_self_hosted_mongodb_version("4".to_string()).unwrap(), "4.4.4"); + assert_eq!(get_self_hosted_mongodb_version("4.2".to_string()).unwrap(), "4.2.12"); + assert_eq!( + get_self_hosted_mongodb_version("3.4".to_string()) + .unwrap_err() + .message(SafeOnly) + .as_str(), + "MongoDB 3.4 version is not supported" + ); + } +} diff --git a/src/models/aws/mod.rs b/src/models/aws/mod.rs index e72d16ea..39d68d3e 100644 --- a/src/models/aws/mod.rs +++ b/src/models/aws/mod.rs @@ -1,5 +1,7 @@ -pub mod application; -pub mod router; +mod application; +mod database; +mod database_utils; +mod router; use crate::models::types::CloudProvider; use crate::models::types::AWS; @@ -30,7 +32,7 @@ impl CloudProvider for AWS { "Elastic Container Registry" } - fn helm_directory_name() -> &'static str { + fn lib_directory_name() -> &'static str { "aws" } } diff --git a/src/models/database.rs b/src/models/database.rs new file mode 100644 index 00000000..7b5c7052 --- /dev/null +++ b/src/models/database.rs @@ -0,0 +1,508 @@ +use crate::cloud_provider::service::{ + check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, + get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, DatabaseOptions, Delete, Helm, + Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, +}; +use crate::cloud_provider::utilities::{check_domain_for, managed_db_name_sanitizer, print_action}; +use crate::cloud_provider::{service, DeploymentTarget}; +use crate::cmd::helm::Timeout; +use crate::cmd::kubectl; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; +use crate::io_models::{Context, Listen, Listener, Listeners, ListenersHelper}; +use crate::logger::Logger; +use crate::models::database_utils::{ + get_self_hosted_mongodb_version, get_self_hosted_mysql_version, get_self_hosted_postgres_version, + get_self_hosted_redis_version, +}; +use crate::models::types::{CloudProvider, ToTeraContext, VersionsNumber}; +use function_name::named; +use std::borrow::Borrow; +use std::marker::PhantomData; +use tera::Context as TeraContext; + +///////////////////////////////////////////////////////////////// +// Database mode +pub struct Managed {} +pub struct Container {} +pub trait DatabaseMode { + fn is_managed() -> bool; + fn is_container() -> bool { + !Self::is_managed() + } +} + +impl DatabaseMode for Managed { + fn is_managed() -> bool { + true + } +} + +impl DatabaseMode for Container { + fn is_managed() -> bool { + false + } +} + +///////////////////////////////////////////////////////////////// +// Database types, will be only used as a marker +pub struct PostgresSQL {} +pub struct MySQL {} +pub struct MongoDB {} +pub struct Redis {} + +pub trait DatabaseType { + type DatabaseOptions; + + fn short_name() -> &'static str; + fn lib_directory_name() -> &'static str; + fn db_type() -> service::DatabaseType; +} + +#[derive(thiserror::Error, Debug)] +pub enum DatabaseError { + #[error("Application invalid configuration: {0}")] + InvalidConfig(String), +} + +pub struct Database> { + _marker: PhantomData<(C, M, T)>, + pub(super) context: Context, + pub(super) id: String, + pub(super) action: Action, + pub(super) name: String, + pub(super) version: VersionsNumber, + pub(super) fqdn: String, + pub(super) fqdn_id: String, + pub(super) total_cpus: String, + pub(super) total_ram_in_mib: u32, + pub(super) database_instance_type: String, + pub(super) publicly_accessible: bool, + pub(super) private_port: u16, + pub(super) options: T::DatabaseOptions, + pub(super) listeners: Listeners, + pub(super) logger: Box, +} + +impl> Database { + pub fn new( + context: Context, + id: &str, + action: Action, + name: &str, + version: VersionsNumber, + fqdn: &str, + fqdn_id: &str, + total_cpus: String, + total_ram_in_mib: u32, + database_instance_type: &str, + publicly_accessible: bool, + private_port: u16, + options: T::DatabaseOptions, + listeners: Listeners, + logger: Box, + ) -> Result { + // TODO: Implement domain constraint logic + + Ok(Self { + _marker: PhantomData, + context, + action, + id: id.to_string(), + name: name.to_string(), + version, + fqdn: fqdn.to_string(), + fqdn_id: fqdn_id.to_string(), + total_cpus, + total_ram_in_mib, + database_instance_type: database_instance_type.to_string(), + publicly_accessible, + private_port, + options, + listeners, + logger, + }) + } + + fn selector(&self) -> String { + format!("databaseId={}", self.id) + } +} + +impl> Terraform for Database { + fn terraform_common_resource_dir_path(&self) -> String { + format!("{}/{}/services/common", self.context.lib_root_dir(), C::lib_directory_name()) + } + + fn terraform_resource_dir_path(&self) -> String { + format!( + "{}/{}/services/{}", + self.context.lib_root_dir(), + C::lib_directory_name(), + T::lib_directory_name() + ) + } +} + +impl> Listen for Database { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} + +impl> ToTransmitter for Database { + fn to_transmitter(&self) -> Transmitter { + Transmitter::Database(self.id.to_string(), T::short_name().to_string(), self.name.to_string()) + } +} + +impl> Service for Database +where + Database: ToTeraContext, +{ + fn context(&self) -> &Context { + &self.context + } + + fn service_type(&self) -> ServiceType { + ServiceType::Database(T::db_type()) + } + + fn id(&self) -> &str { + &self.id + } + + fn name(&self) -> &str { + &self.name + } + + fn sanitized_name(&self) -> String { + // FIXME: specific case only for aws ;'( + // This is sad, but can't change that as it would break/wipe all container db for users + if C::lib_directory_name() == "aws" { + managed_db_name_sanitizer(60, T::lib_directory_name(), &self.id) + } else { + format!("{}-{}", T::lib_directory_name(), &self.id) + } + } + + fn version(&self) -> String { + self.version.to_string() + } + + fn action(&self) -> &Action { + &self.action + } + + fn private_port(&self) -> Option { + Some(self.private_port) + } + + fn start_timeout(&self) -> Timeout { + Timeout::Default + } + + fn total_cpus(&self) -> String { + self.total_cpus.to_string() + } + + fn cpu_burst(&self) -> String { + self.total_cpus.to_string() + } + + fn total_ram_in_mib(&self) -> u32 { + self.total_ram_in_mib + } + + fn min_instances(&self) -> u32 { + 1 + } + + fn max_instances(&self) -> u32 { + 1 + } + + fn publicly_accessible(&self) -> bool { + self.publicly_accessible + } + + fn tera_context(&self, target: &DeploymentTarget) -> Result { + self.to_tera_context(target) + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn selector(&self) -> Option { + Some(self.selector()) + } +} + +impl> Helm for Database { + fn helm_selector(&self) -> Option { + Some(self.selector()) + } + + fn helm_release_name(&self) -> String { + format!("{}-{}", DbType::lib_directory_name(), self.id) + } + + fn helm_chart_dir(&self) -> String { + format!( + "{}/common/services/{}", + self.context.lib_root_dir(), + DbType::lib_directory_name() + ) + } + + fn helm_chart_values_dir(&self) -> String { + format!( + "{}/{}/chart_values/{}", + self.context.lib_root_dir(), + Cloud::lib_directory_name(), + DbType::lib_directory_name() + ) + } + + fn helm_chart_external_name_service_dir(&self) -> String { + format!("{}/common/charts/external-name-svc", self.context.lib_root_dir()) + } +} + +impl> Create for Database +where + Database: ToTeraContext, +{ + #[named] + fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + + send_progress_on_long_task(self, Action::Create, || { + deploy_stateful_service(target, self, event_details.clone(), self.logger()) + }) + } + + #[named] + fn on_create_check(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + + if self.publicly_accessible { + check_domain_for( + ListenersHelper::new(&self.listeners), + vec![&self.fqdn], + self.context.execution_id(), + self.context.execution_id(), + event_details, + self.logger(), + )?; + } + Ok(()) + } + + #[named] + fn on_create_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + Ok(()) + } +} + +impl> Pause for Database +where + Database: ToTeraContext, +{ + #[named] + fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + send_progress_on_long_task(self, Action::Pause, || scale_down_database(target, self, 0)) + } + + fn on_pause_check(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + Ok(()) + } +} + +impl> Delete for Database +where + Database: ToTeraContext, +{ + #[named] + fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + + send_progress_on_long_task(self, Action::Delete, || { + delete_stateful_service(target, self, event_details.clone(), self.logger()) + }) + } + + fn on_delete_check(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_delete_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + C::short_name(), + T::db_type().to_string().as_str(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + + Ok(()) + } +} + +impl> StatefulService for Database +where + Database: ToTeraContext, +{ + fn as_stateful_service(&self) -> &dyn StatefulService { + self + } + + fn is_managed_service(&self) -> bool { + M::is_managed() + } +} + +impl> service::Database for Database where + Database: ToTeraContext +{ +} + +impl> Database +where + Database: Service, +{ + fn get_version(&self, event_details: EventDetails) -> Result { + let fn_version = match T::db_type() { + service::DatabaseType::PostgreSQL => get_self_hosted_postgres_version, + service::DatabaseType::MongoDB => get_self_hosted_mongodb_version, + service::DatabaseType::MySQL => get_self_hosted_mysql_version, + service::DatabaseType::Redis => get_self_hosted_redis_version, + }; + + check_service_version(fn_version(self.version.to_string()), self, event_details, self.logger()) + } + + pub(super) fn to_tera_context_for_container( + &self, + target: &DeploymentTarget, + options: &DatabaseOptions, + ) -> Result { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + let mut context = default_tera_context(self, kubernetes, environment); + + // we need the kubernetes config file to store tfstates file in kube secrets + let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; + context.insert("kubeconfig_path", &kube_config_file_path); + + kubectl::kubectl_exec_create_namespace_without_labels( + environment.namespace(), + kube_config_file_path.as_str(), + kubernetes.cloud_provider().credentials_environment_variables(), + ); + + context.insert("namespace", environment.namespace()); + + let version = self.get_version(event_details)?.matched_version().to_string(); + context.insert("version", &version); + + for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { + context.insert(k, v); + } + + context.insert("kubernetes_cluster_id", kubernetes.id()); + context.insert("kubernetes_cluster_name", kubernetes.name()); + + context.insert("fqdn_id", self.fqdn_id.as_str()); + context.insert("fqdn", self.fqdn(target, &self.fqdn, M::is_managed()).as_str()); + context.insert("service_name", self.fqdn_id.as_str()); + context.insert("database_db_name", self.name()); + context.insert("database_login", options.login.as_str()); + context.insert("database_password", options.password.as_str()); + context.insert("database_port", &self.private_port()); + context.insert("database_disk_size_in_gib", &options.disk_size_in_gib); + context.insert("database_instance_type", &self.database_instance_type); + context.insert("database_disk_type", &options.database_disk_type); + context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); + context.insert("database_total_cpus", &self.total_cpus); + context.insert("database_fqdn", &options.host.as_str()); + context.insert("database_id", &self.id()); + context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); + context.insert("tfstate_name", &get_tfstate_name(self)); + context.insert("publicly_accessible", &self.publicly_accessible); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} diff --git a/src/models/database_utils.rs b/src/models/database_utils.rs new file mode 100644 index 00000000..41068f02 --- /dev/null +++ b/src/models/database_utils.rs @@ -0,0 +1,199 @@ +use crate::errors::CommandError; +use crate::models::types::VersionsNumber; +use std::collections::HashMap; +use std::str::FromStr; + +pub fn get_self_hosted_postgres_version(requested_version: String) -> Result { + let mut supported_postgres_versions = HashMap::new(); + + // https://hub.docker.com/r/bitnami/postgresql/tags?page=1&ordering=last_updated + + // v10 + let v10 = generate_supported_version(10, 1, 16, Some(0), Some(0), None); + supported_postgres_versions.extend(v10); + + // v11 + let v11 = generate_supported_version(11, 1, 11, Some(0), Some(0), None); + supported_postgres_versions.extend(v11); + + // v12 + let v12 = generate_supported_version(12, 2, 8, Some(0), Some(0), None); + supported_postgres_versions.extend(v12); + + // v13 + let v13 = generate_supported_version(13, 1, 4, Some(0), Some(0), None); + supported_postgres_versions.extend(v13); + + get_supported_version_to_use("Postgresql", supported_postgres_versions, requested_version) +} + +pub fn get_self_hosted_mysql_version(requested_version: String) -> Result { + let mut supported_mysql_versions = HashMap::new(); + // https://hub.docker.com/r/bitnami/mysql/tags?page=1&ordering=last_updated + + // v5.7 + let v57 = generate_supported_version(5, 7, 7, Some(16), Some(34), None); + supported_mysql_versions.extend(v57); + + // v8 + let v8 = generate_supported_version(8, 0, 0, Some(11), Some(24), None); + supported_mysql_versions.extend(v8); + + get_supported_version_to_use("MySQL", supported_mysql_versions, requested_version) +} + +pub fn get_self_hosted_mongodb_version(requested_version: String) -> Result { + let mut supported_mongodb_versions = HashMap::new(); + + // https://hub.docker.com/r/bitnami/mongodb/tags?page=1&ordering=last_updated + + // v3.6 + let mongo_version = generate_supported_version(3, 6, 6, Some(0), Some(22), None); + supported_mongodb_versions.extend(mongo_version); + + // v4.0 + let mongo_version = generate_supported_version(4, 0, 0, Some(0), Some(23), None); + supported_mongodb_versions.extend(mongo_version); + + // v4.2 + let mongo_version = generate_supported_version(4, 2, 2, Some(0), Some(12), None); + supported_mongodb_versions.extend(mongo_version); + + // v4.4 + let mongo_version = generate_supported_version(4, 4, 4, Some(0), Some(4), None); + supported_mongodb_versions.extend(mongo_version); + + get_supported_version_to_use("MongoDB", supported_mongodb_versions, requested_version) +} + +pub fn get_self_hosted_redis_version(requested_version: String) -> Result { + let mut supported_redis_versions = HashMap::with_capacity(4); + // https://hub.docker.com/r/bitnami/redis/tags?page=1&ordering=last_updated + + supported_redis_versions.insert("6".to_string(), "6.0.9".to_string()); + supported_redis_versions.insert("6.0".to_string(), "6.0.9".to_string()); + supported_redis_versions.insert("5".to_string(), "5.0.10".to_string()); + supported_redis_versions.insert("5.0".to_string(), "5.0.10".to_string()); + + get_supported_version_to_use("Redis", supported_redis_versions, requested_version) +} + +pub fn get_supported_version_to_use( + database_name: &str, + all_supported_versions: HashMap, + version_to_check: String, +) -> Result { + let version = VersionsNumber::from_str(version_to_check.as_str())?; + + // if a patch version is required + if version.patch.is_some() { + return match all_supported_versions.get(&format!( + "{}.{}.{}", + version.major, + version.minor.unwrap(), + version.patch.unwrap() + )) { + Some(version) => Ok(version.to_string()), + None => { + return Err(CommandError::new_from_safe_message(format!( + "{} {} version is not supported", + database_name, version_to_check + ))); + } + }; + } + + // if a minor version is required + if version.minor.is_some() { + return match all_supported_versions.get(&format!("{}.{}", version.major, version.minor.unwrap())) { + Some(version) => Ok(version.to_string()), + None => { + return Err(CommandError::new_from_safe_message(format!( + "{} {} version is not supported", + database_name, version_to_check + ))); + } + }; + }; + + // if only a major version is required + match all_supported_versions.get(&version.major) { + Some(version) => Ok(version.to_string()), + None => { + return Err(CommandError::new_from_safe_message(format!( + "{} {} version is not supported", + database_name, version_to_check + ))); + } + } +} + +// Ease the support of multiple versions by range +pub fn generate_supported_version( + major: i32, + minor_min: i32, + minor_max: i32, + update_min: Option, + update_max: Option, + suffix_version: Option, +) -> HashMap { + let mut supported_versions = HashMap::new(); + let latest_major_version; + + // blank suffix if not requested + let suffix = match suffix_version { + Some(suffix) => suffix, + None => "".to_string(), + }; + + let _ = match update_min { + // manage minor with updates + Some(_) => { + latest_major_version = format!("{}.{}.{}{}", major, minor_max, update_max.unwrap(), suffix); + + if minor_min == minor_max { + // add short minor format targeting latest version + supported_versions.insert(format!("{}.{}", major, minor_max), latest_major_version.clone()); + if update_min.unwrap() == update_max.unwrap() { + let version = format!("{}.{}.{}", major, minor_min, update_min.unwrap()); + supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); + } else { + for update in update_min.unwrap()..update_max.unwrap() + 1 { + let version = format!("{}.{}.{}", major, minor_min, update); + supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); + } + } + } else { + for minor in minor_min..minor_max + 1 { + // add short minor format targeting latest version + supported_versions.insert( + format!("{}.{}", major, minor), + format!("{}.{}.{}", major, minor, update_max.unwrap()), + ); + if update_min.unwrap() == update_max.unwrap() { + let version = format!("{}.{}.{}", major, minor, update_min.unwrap()); + supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); + } else { + for update in update_min.unwrap()..update_max.unwrap() + 1 { + let version = format!("{}.{}.{}", major, minor, update); + supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); + } + } + } + } + } + // manage minor without updates + None => { + latest_major_version = format!("{}.{}{}", major, minor_max, suffix); + for minor in minor_min..minor_max + 1 { + let version = format!("{}.{}", major, minor); + supported_versions.insert(version.clone(), format!("{}{}", version, suffix)); + } + } + }; + + // default major + major.minor supported version + supported_versions.insert(major.to_string(), latest_major_version); + + supported_versions +} diff --git a/src/models/digital_ocean/database.rs b/src/models/digital_ocean/database.rs new file mode 100644 index 00000000..5d08be95 --- /dev/null +++ b/src/models/digital_ocean/database.rs @@ -0,0 +1,152 @@ +use crate::cloud_provider::service::{check_service_version, DatabaseOptions, Service}; +use crate::cloud_provider::{service, DeploymentTarget}; +use crate::errors::EngineError; +use crate::models::database::{Container, Database, DatabaseType, MongoDB, MySQL, PostgresSQL, Redis}; +use crate::models::database_utils::{ + get_self_hosted_mongodb_version, get_self_hosted_mysql_version, get_self_hosted_postgres_version, + get_self_hosted_redis_version, +}; +use crate::models::types::{ToTeraContext, DO}; +use tera::Context as TeraContext; + +///////////////////////////////////////////////////////////////// +// CONTAINER +impl DatabaseType for PostgresSQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "PostgresSQL" + } + fn lib_directory_name() -> &'static str { + "postgresql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::PostgreSQL + } +} + +impl DatabaseType for MySQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "MySQL" + } + fn lib_directory_name() -> &'static str { + "mysql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::MySQL + } +} + +impl DatabaseType for Redis { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + fn lib_directory_name() -> &'static str { + "redis" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::Redis + } +} + +impl DatabaseType for MongoDB { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + + fn lib_directory_name() -> &'static str { + "mongodb" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MongoDB + } +} + +///////////////////////////////////////////////////////////////// +// MANAGED +// DO don't support managed databases for now + +////////////////////////////////////////////////////////////////////////: +// POSTGRES SQL +impl ToTeraContext for Database +where + PostgresSQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_postgres_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MySQL +impl ToTeraContext for Database +where + MySQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_mysql_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MongoDB +impl ToTeraContext for Database +where + MongoDB: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_mongodb_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// Redis +impl ToTeraContext for Database +where + Redis: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_redis_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} diff --git a/src/models/digital_ocean/mod.rs b/src/models/digital_ocean/mod.rs index e2a3d062..fbe03807 100644 --- a/src/models/digital_ocean/mod.rs +++ b/src/models/digital_ocean/mod.rs @@ -1,4 +1,5 @@ mod application; +mod database; mod router; use crate::errors::CommandError; @@ -34,7 +35,7 @@ impl CloudProvider for DO { "Digital Ocean Container Registry" } - fn helm_directory_name() -> &'static str { + fn lib_directory_name() -> &'static str { "digitalocean" } } diff --git a/src/models/mod.rs b/src/models/mod.rs index f0b21d4f..ac27daa2 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -1,5 +1,7 @@ pub mod application; pub mod aws; +pub mod database; +pub(crate) mod database_utils; pub mod digital_ocean; pub mod router; pub mod scaleway; diff --git a/src/models/router.rs b/src/models/router.rs index 8514d8a2..1fc3a9b7 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -218,7 +218,7 @@ impl Helm for Router { format!( "{}/{}/chart_values/nginx-ingress", self.context.lib_root_dir(), - T::helm_directory_name() + T::lib_directory_name() ) } @@ -333,7 +333,7 @@ where let from_dir = format!( "{}/{}/charts/q-ingress-tls", self.context.lib_root_dir(), - T::helm_directory_name() + T::lib_directory_name() ); if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context) diff --git a/src/models/scaleway/database.rs b/src/models/scaleway/database.rs new file mode 100644 index 00000000..2649942a --- /dev/null +++ b/src/models/scaleway/database.rs @@ -0,0 +1,293 @@ +use crate::cloud_provider::service::{ + check_service_version, default_tera_context, get_tfstate_name, get_tfstate_suffix, DatabaseOptions, Service, + ServiceVersionCheckResult, +}; +use crate::cloud_provider::{service, DeploymentTarget}; +use crate::cmd::kubectl; +use crate::errors::EngineError; +use crate::events::{EnvironmentStep, EventDetails, Stage}; +use crate::models::database::{ + Container, Database, DatabaseMode, DatabaseType, Managed, MongoDB, MySQL, PostgresSQL, Redis, +}; +use crate::models::database_utils::{ + get_self_hosted_mongodb_version, get_self_hosted_mysql_version, get_self_hosted_postgres_version, + get_self_hosted_redis_version, +}; +use crate::models::scaleway::database_utils::{pick_managed_mysql_version, pick_managed_postgres_version}; +use crate::models::types::{ToTeraContext, SCW}; +use tera::Context as TeraContext; + +///////////////////////////////////////////////////////////////// +// CONTAINER +impl DatabaseType for PostgresSQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "PostgresSQL" + } + fn lib_directory_name() -> &'static str { + "postgresql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::PostgreSQL + } +} + +impl DatabaseType for MySQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "MySQL" + } + fn lib_directory_name() -> &'static str { + "mysql" + } + fn db_type() -> service::DatabaseType { + service::DatabaseType::MySQL + } +} + +impl DatabaseType for Redis { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + fn lib_directory_name() -> &'static str { + "redis" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::Redis + } +} + +impl DatabaseType for MongoDB { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Redis" + } + + fn lib_directory_name() -> &'static str { + "mongodb" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MongoDB + } +} + +///////////////////////////////////////////////////////////////// +// MANAGED +impl DatabaseType for PostgresSQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "Postgres Managed" + } + fn lib_directory_name() -> &'static str { + "postgresql" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::PostgreSQL + } +} + +impl DatabaseType for MySQL { + type DatabaseOptions = DatabaseOptions; + + fn short_name() -> &'static str { + "MySQL Managed" + } + fn lib_directory_name() -> &'static str { + "mysql" + } + + fn db_type() -> service::DatabaseType { + service::DatabaseType::MySQL + } +} + +// Redis and MongoDB are not supported managed db yet + +impl> Database { + fn to_tera_context_for_scaleway_managed( + &self, + target: &DeploymentTarget, + options: &DatabaseOptions, + get_version: &dyn Fn(EventDetails) -> Result, + ) -> Result + where + Database: Service, + { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); + let kubernetes = target.kubernetes; + let environment = target.environment; + + let mut context = default_tera_context(self, kubernetes, environment); + + // we need the kubernetes config file to store tfstates file in kube secrets + let kube_config_file_path = kubernetes.get_kubeconfig_file_path()?; + context.insert("kubeconfig_path", &kube_config_file_path); + + kubectl::kubectl_exec_create_namespace_without_labels( + environment.namespace(), + kube_config_file_path.as_str(), + kubernetes.cloud_provider().credentials_environment_variables(), + ); + + context.insert("namespace", environment.namespace()); + + let version = get_version(event_details)?.matched_version(); + context.insert("version_major", &version.to_major_version_string()); + context.insert("version", &version.to_string()); // Scaleway needs to have major version only + + for (k, v) in kubernetes.cloud_provider().tera_context_environment_variables() { + context.insert(k, v); + } + + context.insert("kubernetes_cluster_id", kubernetes.id()); + context.insert("kubernetes_cluster_name", kubernetes.name()); + + context.insert("fqdn_id", self.fqdn_id.as_str()); + context.insert("fqdn", self.fqdn(target, &self.fqdn, M::is_managed()).as_str()); + context.insert("service_name", self.fqdn_id.as_str()); + context.insert("database_name", self.sanitized_name().as_str()); + context.insert("database_db_name", self.name()); + context.insert("database_login", options.login.as_str()); + context.insert("database_password", options.password.as_str()); + context.insert("database_port", &self.private_port()); + context.insert("database_disk_size_in_gib", &options.disk_size_in_gib); + context.insert("database_instance_type", &self.database_instance_type); + context.insert("database_disk_type", &options.database_disk_type); + context.insert("database_ram_size_in_mib", &self.total_ram_in_mib); + context.insert("database_total_cpus", &self.total_cpus); + context.insert("database_fqdn", &options.host.as_str()); + context.insert("database_id", &self.id()); + context.insert("tfstate_suffix_name", &get_tfstate_suffix(self)); + context.insert("tfstate_name", &get_tfstate_name(self)); + + context.insert("publicly_accessible", &options.publicly_accessible); + context.insert("activate_high_availability", &options.activate_high_availability); + context.insert("activate_backups", &options.activate_backups); + context.insert("delete_automated_backups", &self.context().is_test_cluster()); + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + Ok(context) + } +} + +////////////////////////////////////////////////////////////////////////: +// POSTGRES SQL +impl ToTeraContext for Database +where + PostgresSQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let check_version = |event_details| { + check_service_version( + pick_managed_postgres_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_scaleway_managed(target, &self.options, &check_version) + } +} + +impl ToTeraContext for Database +where + PostgresSQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_postgres_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MySQL +impl ToTeraContext for Database +where + MySQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let check_version = |event_details| { + check_service_version( + pick_managed_mysql_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_scaleway_managed(target, &self.options, &check_version) + } +} + +impl ToTeraContext for Database +where + MySQL: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_mysql_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// MongoDB +impl ToTeraContext for Database +where + MongoDB: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_mongodb_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + + self.to_tera_context_for_container(target, &self.options) + } +} + +////////////////////////////////////////////////////////////////////////: +// Redis +impl ToTeraContext for Database +where + Redis: DatabaseType, +{ + fn to_tera_context(&self, target: &DeploymentTarget) -> Result { + let _check_version = |event_details| { + check_service_version( + get_self_hosted_redis_version(self.version.to_string()), + self, + event_details, + self.logger(), + ) + }; + self.to_tera_context_for_container(target, &self.options) + } +} diff --git a/src/models/scaleway/database_utils.rs b/src/models/scaleway/database_utils.rs new file mode 100644 index 00000000..31fb45cd --- /dev/null +++ b/src/models/scaleway/database_utils.rs @@ -0,0 +1,36 @@ +use crate::errors::CommandError; +use crate::models::database_utils::get_supported_version_to_use; +use std::collections::HashMap; + +pub(super) fn pick_managed_postgres_version(requested_version: String) -> Result { + // Scaleway supported postgres versions + // https://api.scaleway.com/rdb/v1/regions/fr-par/database-engines + let mut supported_postgres_versions = HashMap::new(); + + // {"name":"PostgreSQL","version":"13","end_of_life":"2025-11-13T00:00:00Z"} + // {"name":"PostgreSQL","version":"12","end_of_life":"2024-11-14T00:00:00Z"} + // {"name":"PostgreSQL","version":"11","end_of_life":"2023-11-09T00:00:00Z"} + // {"name":"PostgreSQL","version":"10","end_of_life":"2022-11-10T00:00:00Z"} + supported_postgres_versions.insert("10".to_string(), "10".to_string()); + supported_postgres_versions.insert("10.0".to_string(), "10.0".to_string()); + supported_postgres_versions.insert("11".to_string(), "11".to_string()); + supported_postgres_versions.insert("11.0".to_string(), "11.0".to_string()); + supported_postgres_versions.insert("12".to_string(), "12".to_string()); + supported_postgres_versions.insert("12.0".to_string(), "12.0".to_string()); + supported_postgres_versions.insert("13".to_string(), "13".to_string()); + supported_postgres_versions.insert("13.0".to_string(), "13.0".to_string()); + + get_supported_version_to_use("RDB postgres", supported_postgres_versions, requested_version) +} + +pub(super) fn pick_managed_mysql_version(requested_version: String) -> Result { + // Scaleway supported MySQL versions + // https://api.scaleway.com/rdb/v1/regions/fr-par/database-engines + let mut supported_mysql_versions = HashMap::new(); + + // {"name": "MySQL", "version":"8","end_of_life":"2026-04-01T00:00:00Z"} + supported_mysql_versions.insert("8".to_string(), "8".to_string()); + supported_mysql_versions.insert("8.0".to_string(), "8.0".to_string()); + + get_supported_version_to_use("RDB MySQL", supported_mysql_versions, requested_version) +} diff --git a/src/models/scaleway/mod.rs b/src/models/scaleway/mod.rs index b9067fc7..9d4a7edc 100644 --- a/src/models/scaleway/mod.rs +++ b/src/models/scaleway/mod.rs @@ -1,4 +1,6 @@ mod application; +mod database; +mod database_utils; mod router; use crate::errors::CommandError; @@ -33,7 +35,7 @@ impl CloudProvider for SCW { "Scaleway Container Registry" } - fn helm_directory_name() -> &'static str { + fn lib_directory_name() -> &'static str { "scaleway" } } diff --git a/src/models/types.rs b/src/models/types.rs index 59cb6b23..d63bbeb1 100644 --- a/src/models/types.rs +++ b/src/models/types.rs @@ -1,5 +1,10 @@ +use serde_derive::{Deserialize, Serialize}; +use std::fmt; +use std::fmt::Write; +use std::str::FromStr; + use crate::cloud_provider::DeploymentTarget; -use crate::errors::EngineError; +use crate::errors::{CommandError, EngineError}; use tera::Context as TeraContext; // Those types are just marker types that are use to tag our struct/object model @@ -19,9 +24,106 @@ pub trait CloudProvider { fn full_name() -> &'static str; fn registry_short_name() -> &'static str; fn registry_full_name() -> &'static str; - fn helm_directory_name() -> &'static str; + fn lib_directory_name() -> &'static str; } pub(crate) trait ToTeraContext { fn to_tera_context(&self, target: &DeploymentTarget) -> Result; } + +// unfortunately some proposed versions are not SemVer like Elasticache (6.x) +// this is why we need ot have our own structure +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +pub struct VersionsNumber { + pub(crate) major: String, + pub(crate) minor: Option, + pub(crate) patch: Option, + pub(crate) suffix: Option, +} + +impl VersionsNumber { + pub fn new(major: String, minor: Option, patch: Option, suffix: Option) -> Self { + VersionsNumber { + major, + minor, + patch, + suffix, + } + } + + pub fn to_major_version_string(&self) -> String { + self.major.clone() + } + + pub fn to_major_minor_version_string(&self, default_minor: &str) -> String { + let test = format!( + "{}.{}", + self.major.clone(), + self.minor.as_ref().unwrap_or(&default_minor.to_string()) + ); + + test + } +} + +impl FromStr for VersionsNumber { + type Err = CommandError; + + fn from_str(version: &str) -> Result { + if version.trim() == "" { + return Err(CommandError::new_from_safe_message("version cannot be empty".to_string())); + } + + let mut version_split = version.splitn(4, '.').map(|v| v.trim()); + + let major = match version_split.next() { + Some(major) => { + let major = major.to_string(); + major.replace('v', "") + } + None => { + return Err(CommandError::new_from_safe_message(format!( + "please check the version you've sent ({}), it can't be checked", + version + ))) + } + }; + + let minor = version_split.next().map(|minor| { + let minor = minor.to_string(); + minor.replace('+', "") + }); + + let patch = version_split.next().map(|patch| patch.to_string()); + + let suffix = version_split.next().map(|suffix| suffix.to_string()); + + // TODO(benjaminch): Handle properly the case where versions are empty + // eq. 1..2 + + Ok(VersionsNumber::new(major, minor, patch, suffix)) + } +} + +impl fmt::Display for VersionsNumber { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.major)?; + + if let Some(minor) = &self.minor { + f.write_char('.')?; + f.write_str(minor)?; + } + + if let Some(patch) = &self.patch { + f.write_char('.')?; + f.write_str(patch)?; + } + + if let Some(suffix) = &self.suffix { + f.write_char('.')?; + f.write_str(suffix)?; + } + + Ok(()) + } +} diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index e201076c..72c3450a 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -323,7 +323,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql{}-0", &environment_check.databases[0].name); + let database_name = format!("postgresql{}-0", &environment_check.databases[0].id); match is_pod_restarted_env(context, Kind::Aws, environment_check, database_name.as_str(), secrets) { (true, _) => assert!(true), (false, _) => assert!(false), diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 128e8917..8de7ff7e 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -360,7 +360,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(result, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql-{}-0", &environment_check.databases[0].name); + let database_name = format!("postgresql-{}-0", &environment_check.databases[0].id); match is_pod_restarted_env( context.clone(), ProviderKind::Scw, From 5522c74ff96d0c52597ce0cdab4dae1801037f5b Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Wed, 30 Mar 2022 23:10:13 +0200 Subject: [PATCH 011/122] Fix tests --- tests/aws/aws_environment.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index d45cc26f..ac3b3dc0 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -220,7 +220,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { None, ); for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") { - assert_eq!(pdb.metadata.name.contains(&environment.applications[0].name), false) + assert_eq!(pdb.metadata.name.contains(&environment.applications[0].id), false) } // Check we can resume the env @@ -265,7 +265,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { ); let mut filtered_pdb = false; for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") { - if pdb.metadata.name.contains(&environment.applications[0].name) { + if pdb.metadata.name.contains(&environment.applications[0].id) { filtered_pdb = true; break; } From be14e6ae63eef668c0ebbe567c354888eead4514 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 31 Mar 2022 00:01:35 +0200 Subject: [PATCH 012/122] Cleanup --- src/cloud_provider/service.rs | 2 +- src/io_models.rs | 189 +++++++++++++++++----------------- src/models/database.rs | 5 +- 3 files changed, 101 insertions(+), 95 deletions(-) diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index eed91974..642bcb83 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -250,7 +250,7 @@ pub struct DatabaseOptions { pub publicly_accessible: bool, } -#[derive(Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq)] pub enum DatabaseType { PostgreSQL, MongoDB, diff --git a/src/io_models.rs b/src/io_models.rs index 9e729919..92c1eb11 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -16,19 +16,19 @@ use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; -use crate::cloud_provider::CloudProvider; use crate::cloud_provider::Kind as CPKind; +use crate::cloud_provider::{service, CloudProvider}; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; use crate::models; use crate::models::application::{ApplicationError, ApplicationService}; use crate::models::aws::{AwsAppExtraSettings, AwsRouterExtraSettings, AwsStorageType}; -use crate::models::database::{Container, Managed, MongoDB, MySQL, PostgresSQL, Redis}; +use crate::models::database::{Container, DatabaseError, Managed, MongoDB, MySQL, PostgresSQL, Redis}; use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, DoStorageType}; use crate::models::router::RouterError; use crate::models::scaleway::{ScwAppExtraSettings, ScwRouterExtraSettings, ScwStorageType}; -use crate::models::types::{VersionsNumber, AWS, DO, SCW}; +use crate::models::types::{CloudProvider as CP, VersionsNumber, AWS, DO, SCW}; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -120,11 +120,16 @@ impl EnvironmentRequest { } } - let databases = self - .databases - .iter() - .filter_map(|x| x.to_database_domain(context, cloud_provider, logger.clone())) - .collect::>(); + let mut databases = Vec::with_capacity(self.databases.len()); + for db in &self.databases { + match db.to_database_domain(context, cloud_provider, logger.clone()) { + Ok(router) => databases.push(router), + Err(err) => { + //FIXME: propagate the correct Error + return Err(ApplicationError::InvalidConfig(format!("{}", err))); + } + } + } Ok(Environment::new( self.id.as_str(), @@ -621,7 +626,7 @@ impl Database { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Option> { + ) -> Result, DatabaseError> { let database_options = DatabaseOptions { mode: self.mode.clone(), login: self.username.clone(), @@ -637,6 +642,8 @@ impl Database { }; let listeners = cloud_provider.listeners().clone(); + let version = VersionsNumber::from_str(self.version.as_str()) + .map_err(|_| DatabaseError::InvalidConfig(format!("Bad version number: {}", self.version)))?; match (cloud_provider.kind(), &self.kind, &self.mode) { (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { @@ -645,7 +652,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -656,10 +663,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -667,7 +673,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -678,10 +684,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { @@ -690,7 +695,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -701,10 +706,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -712,7 +716,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -723,10 +727,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( @@ -734,7 +737,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -745,10 +748,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -756,7 +758,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -767,10 +769,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( @@ -778,7 +779,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -789,10 +790,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -800,7 +800,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -811,20 +811,18 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } - (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => None, (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -835,19 +833,17 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } - (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::MANAGED) => None, (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -858,19 +854,17 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } - (CPKind::Do, DatabaseKind::Redis, DatabaseMode::MANAGED) => None, (CPKind::Do, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -881,19 +875,17 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } - (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => None, (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -904,11 +896,25 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } + (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => Err( + DatabaseError::UnsupportedManagedMode(service::DatabaseType::PostgreSQL, DO::full_name().to_string()), + ), + (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::MANAGED) => Err(DatabaseError::UnsupportedManagedMode( + service::DatabaseType::MySQL, + DO::full_name().to_string(), + )), + (CPKind::Do, DatabaseKind::Redis, DatabaseMode::MANAGED) => Err(DatabaseError::UnsupportedManagedMode( + service::DatabaseType::Redis, + DO::full_name().to_string(), + )), + (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => Err(DatabaseError::UnsupportedManagedMode( + service::DatabaseType::MongoDB, + DO::full_name().to_string(), + )), (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( @@ -916,7 +922,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -927,9 +933,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); - Some(Box::new(db)) + )?; + + Ok(Box::new(db)) } (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -937,7 +943,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -948,9 +954,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); - Some(Box::new(db)) + )?; + + Ok(Box::new(db)) } (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( @@ -958,7 +964,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -969,9 +975,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); - Some(Box::new(db)) + )?; + + Ok(Box::new(db)) } (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -979,7 +985,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -990,14 +996,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) - } - (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::MANAGED) => { - // Not Implemented - None + Ok(Box::new(db)) } (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -1005,7 +1006,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -1016,14 +1017,9 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) - } - (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { - // Not Implemented - None + Ok(Box::new(db)) } (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( @@ -1031,7 +1027,7 @@ impl Database { self.id.as_str(), self.action.to_service_action(), self.name.as_str(), - VersionsNumber::from_str(self.version.as_str()).ok()?, + version, self.fqdn.as_str(), self.fqdn_id.as_str(), self.total_cpus.clone(), @@ -1042,11 +1038,18 @@ impl Database { database_options, listeners, logger, - ) - .unwrap(); + )?; - Some(Box::new(db)) + Ok(Box::new(db)) } + (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::MANAGED) => Err(DatabaseError::UnsupportedManagedMode( + service::DatabaseType::Redis, + SCW::full_name().to_string(), + )), + (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => Err(DatabaseError::UnsupportedManagedMode( + service::DatabaseType::MongoDB, + SCW::full_name().to_string(), + )), } } } diff --git a/src/models/database.rs b/src/models/database.rs index 7b5c7052..679dea05 100644 --- a/src/models/database.rs +++ b/src/models/database.rs @@ -61,8 +61,11 @@ pub trait DatabaseType { #[derive(thiserror::Error, Debug)] pub enum DatabaseError { - #[error("Application invalid configuration: {0}")] + #[error("Database invalid configuration: {0}")] InvalidConfig(String), + + #[error("Managed database for {0:?} is not supported (yet) by provider {1}")] + UnsupportedManagedMode(service::DatabaseType, String), } pub struct Database> { From d30bbab2a2ded465b542e9bdf8247ebc59865859 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Thu, 31 Mar 2022 00:06:54 +0200 Subject: [PATCH 013/122] fix: avoid leaking env vars to listeners (#677) Ticket: ENG-1156 --- src/cloud_provider/service.rs | 6 +- src/errors/mod.rs | 114 +++++++++++++++++++++++++++++++++- src/events/mod.rs | 34 ++++++++++ 3 files changed, 151 insertions(+), 3 deletions(-) diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 642bcb83..dc09100c 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -1004,11 +1004,13 @@ where service.progress_scope(), ProgressLevel::Error, Some(format!( - "{} error {} {} : error => {:?}", + "{} error {} {} : error => {}", action_verb, service.service_type().name().to_lowercase(), service.name(), - err + // Note: env vars are not leaked to legacy listeners since it can holds sensitive data + // such as secrets and such. + err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) )), kubernetes.context().execution_id(), ); diff --git a/src/errors/mod.rs b/src/errors/mod.rs index a5708a5b..79103ccb 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -2863,6 +2863,118 @@ impl EngineError { impl Display for EngineError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str(format!("{:?}", self).as_str()) + // Note: just in case, env vars are not leaked since it can hold sensitive data such as secrets. + f.write_str(self.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars).as_str()) + } +} + +#[cfg(test)] +mod tests { + use crate::cloud_provider::Kind; + use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; + use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter}; + use crate::io_models::QoveryIdentifier; + use crate::models::scaleway::ScwRegion; + + #[test] + fn test_command_error_test_hidding_env_vars_in_message_safe_only() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = command_err.message(ErrorMessageVerbosity::SafeOnly); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } + + #[test] + fn test_command_error_test_hidding_env_vars_in_message_full_without_env_vars() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = command_err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } + + #[test] + fn test_engine_error_test_hidding_env_vars_in_message_safe_only() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + let cluster_id = QoveryIdentifier::new_random(); + let engine_err = EngineError::new_unknown( + EventDetails::new( + Some(Kind::Scw), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + Some(ScwRegion::Paris.as_str().to_string()), + Stage::Infrastructure(InfrastructureStep::Create), + Transmitter::Kubernetes(cluster_id.to_string(), cluster_id.to_string()), + ), + "qovery_log_message".to_string(), + "user_log_message".to_string(), + Some(command_err.clone()), + None, + None, + ); + + // execute: + let res = engine_err.message(ErrorMessageVerbosity::SafeOnly); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } + + #[test] + fn test_engine_error_test_hidding_env_vars_in_message_full_without_env_vars() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + let cluster_id = QoveryIdentifier::new_random(); + let engine_err = EngineError::new_unknown( + EventDetails::new( + Some(Kind::Scw), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + Some(ScwRegion::Paris.as_str().to_string()), + Stage::Infrastructure(InfrastructureStep::Create), + Transmitter::Kubernetes(cluster_id.to_string(), cluster_id.to_string()), + ), + "qovery_log_message".to_string(), + "user_log_message".to_string(), + Some(command_err.clone()), + None, + None, + ); + + // execute: + let res = engine_err.message(ErrorMessageVerbosity::SafeOnly); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); } } diff --git a/src/events/mod.rs b/src/events/mod.rs index f1e472a9..7fe246f0 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -578,4 +578,38 @@ mod tests { assert_eq!(expected_step_name, result); } } + + #[test] + fn test_event_message_test_hidding_env_vars_in_message_safe_only() { + // setup: + let event_message = EventMessage::new_with_env_vars( + "my safe message".to_string(), + Some("my full message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = event_message.message(EventMessageVerbosity::SafeOnly); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } + + #[test] + fn test_event_message_test_hidding_env_vars_in_message_full_without_env_vars() { + // setup: + let event_message = EventMessage::new_with_env_vars( + "my safe message".to_string(), + Some("my full message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = event_message.message(EventMessageVerbosity::FullDetailsWithoutEnvVars); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } } From fd1889ce5eb296470e8af205a80195faa78277ba Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Thu, 31 Mar 2022 00:12:20 +0200 Subject: [PATCH 014/122] Rename Database trait to DatabaseService --- src/cloud_provider/environment.rs | 6 +++--- src/cloud_provider/service.rs | 4 ++-- src/io_models.rs | 2 +- src/models/database.rs | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/cloud_provider/environment.rs b/src/cloud_provider/environment.rs index f732aeca..c32cb888 100644 --- a/src/cloud_provider/environment.rs +++ b/src/cloud_provider/environment.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::service::{Action, Database, RouterService, StatefulService, StatelessService}; +use crate::cloud_provider::service::{Action, DatabaseService, RouterService, StatefulService, StatelessService}; use crate::models::application::ApplicationService; pub struct Environment { @@ -10,7 +10,7 @@ pub struct Environment { pub action: Action, pub applications: Vec>, pub routers: Vec>, - pub databases: Vec>, + pub databases: Vec>, } impl Environment { @@ -22,7 +22,7 @@ impl Environment { action: Action, applications: Vec>, routers: Vec>, - databases: Vec>, + databases: Vec>, ) -> Self { Environment { namespace: format!("{}-{}", project_id, id), diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index dc09100c..5e00ec9d 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -174,7 +174,7 @@ pub trait RouterService: StatelessService + Listen + Helm { } } -pub trait Database: StatefulService { +pub trait DatabaseService: StatefulService { fn check_domains( &self, listeners: Listeners, @@ -462,7 +462,7 @@ where pub fn scale_down_database( target: &DeploymentTarget, - service: &impl Database, + service: &impl DatabaseService, replicas_count: usize, ) -> Result<(), EngineError> { if service.is_managed_service() { diff --git a/src/io_models.rs b/src/io_models.rs index 92c1eb11..7aedf487 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -626,7 +626,7 @@ impl Database { context: &Context, cloud_provider: &dyn CloudProvider, logger: Box, - ) -> Result, DatabaseError> { + ) -> Result, DatabaseError> { let database_options = DatabaseOptions { mode: self.mode.clone(), login: self.username.clone(), diff --git a/src/models/database.rs b/src/models/database.rs index 679dea05..7d8db645 100644 --- a/src/models/database.rs +++ b/src/models/database.rs @@ -1,7 +1,7 @@ use crate::cloud_provider::service::{ check_service_version, default_tera_context, delete_stateful_service, deploy_stateful_service, get_tfstate_name, - get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, DatabaseOptions, Delete, Helm, - Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, + get_tfstate_suffix, scale_down_database, send_progress_on_long_task, Action, Create, DatabaseOptions, + DatabaseService, Delete, Helm, Pause, Service, ServiceType, ServiceVersionCheckResult, StatefulService, Terraform, }; use crate::cloud_provider::utilities::{check_domain_for, managed_db_name_sanitizer, print_action}; use crate::cloud_provider::{service, DeploymentTarget}; @@ -432,7 +432,7 @@ where } } -impl> service::Database for Database where +impl> DatabaseService for Database where Database: ToTeraContext { } From 8ea79a0efc81285865ed8a17e3aecec716ca059b Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Thu, 31 Mar 2022 18:04:17 +0200 Subject: [PATCH 015/122] fix: avoid leaking details on dbg print (#678) Ticket: ENG-1156 --- Cargo.lock | 12 ++++++++ Cargo.toml | 1 + src/cloud_provider/service.rs | 4 +-- src/errors/mod.rs | 58 ++++++++++++++++++++++++++++++++++- src/events/mod.rs | 24 ++++++++++++++- 5 files changed, 95 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5e49ce1..e9711f16 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -493,6 +493,17 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2 1.0.28", + "quote 1.0.9", + "syn 1.0.74", +] + [[package]] name = "deunicode" version = "0.4.3" @@ -2076,6 +2087,7 @@ dependencies = [ "base64 0.13.0", "chrono", "cmd_lib", + "derivative", "digitalocean", "dirs", "flate2", diff --git a/Cargo.toml b/Cargo.toml index 7f5b924b..5ae0c0e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" [dependencies] chrono = "0.4.19" cmd_lib = "1.0.13" +derivative = "2.2.0" git2 = "0.14.2" walkdir = "2.3.2" itertools = "0.10.0" diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 5e00ec9d..29c45d75 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -1004,13 +1004,13 @@ where service.progress_scope(), ProgressLevel::Error, Some(format!( - "{} error {} {} : error => {}", + "{} error {} {} : error => {:?}", action_verb, service.service_type().name().to_lowercase(), service.name(), // Note: env vars are not leaked to legacy listeners since it can holds sensitive data // such as secrets and such. - err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) + err )), kubernetes.context().execution_id(), ); diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 79103ccb..91f9ff79 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -1,5 +1,6 @@ pub mod io; +extern crate derivative; extern crate url; use crate::build_platform::BuildError; @@ -12,6 +13,7 @@ use crate::events::{EventDetails, GeneralStep, Stage, Transmitter}; use crate::io_models::QoveryIdentifier; use crate::models::types::VersionsNumber; use crate::object_storage::errors::ObjectStorageError; +use derivative::Derivative; use std::fmt::{Display, Formatter}; use thiserror::Error; use url::Url; @@ -24,13 +26,16 @@ pub enum ErrorMessageVerbosity { } /// CommandError: command error, mostly returned by third party tools. -#[derive(Clone, Debug, Error, PartialEq)] +#[derive(Derivative, Clone, Error, PartialEq)] +#[derivative(Debug)] pub struct CommandError { /// full_details: full error message, can contains unsafe text such as passwords and tokens. full_details: String, /// message_safe: error message omitting displaying any protected data such as passwords and tokens. message_safe: Option, /// env_vars: environments variables including touchy data such as secret keys. + /// env_vars field is ignored from any wild Debug printing because of it touchy data it carries. + #[derivative(Debug = "ignore")] env_vars: Option>, } @@ -2977,4 +2982,55 @@ mod tests { assert!(!res.contains("my_secret")); assert!(!res.contains("my_secret_value")); } + + #[test] + fn test_command_error_test_hidding_env_vars_in_debug() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = format!("{:?}", command_err); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } + + #[test] + fn test_engine_error_test_hidding_env_vars_in_debug() { + // setup: + let command_err = CommandError::new_with_env_vars( + "my raw message".to_string(), + Some("my safe message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + let cluster_id = QoveryIdentifier::new_random(); + let engine_err = EngineError::new_unknown( + EventDetails::new( + Some(Kind::Scw), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + QoveryIdentifier::new_random(), + Some(ScwRegion::Paris.as_str().to_string()), + Stage::Infrastructure(InfrastructureStep::Create), + Transmitter::Kubernetes(cluster_id.to_string(), cluster_id.to_string()), + ), + "qovery_log_message".to_string(), + "user_log_message".to_string(), + Some(command_err.clone()), + None, + None, + ); + + // execute: + let res = format!("{:?}", engine_err); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } } diff --git a/src/events/mod.rs b/src/events/mod.rs index 7fe246f0..3cb36db7 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -4,11 +4,13 @@ pub mod io; +extern crate derivative; extern crate url; use crate::cloud_provider::Kind; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::io_models::QoveryIdentifier; +use derivative::Derivative; use std::fmt::{Display, Formatter}; #[derive(Debug, Clone)] @@ -63,7 +65,8 @@ impl From for ErrorMessageVerbosity { } } -#[derive(Debug, Clone)] +#[derive(Derivative, Clone)] +#[derivative(Debug)] /// EventMessage: represents an event message. pub struct EventMessage { // Message which is known to be safe: doesn't expose any credentials nor touchy info. @@ -71,6 +74,8 @@ pub struct EventMessage { // String containing full details including touchy data (passwords and tokens). full_details: Option, // Environments variables including touchy data such as secret keys. + // env_vars field is ignored from any wild Debug printing because of it touchy data it carries. + #[derivative(Debug = "ignore")] env_vars: Option>, } @@ -612,4 +617,21 @@ mod tests { assert!(!res.contains("my_secret")); assert!(!res.contains("my_secret_value")); } + + #[test] + fn test_event_message_test_hidding_env_vars_in_debug() { + // setup: + let event_message = EventMessage::new_with_env_vars( + "my safe message".to_string(), + Some("my full message".to_string()), + Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), + ); + + // execute: + let res = format!("{:?}", event_message); + + // verify: + assert!(!res.contains("my_secret")); + assert!(!res.contains("my_secret_value")); + } } From 520c854fdeba843cc708f3dcc2125d7f1d4fb713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 1 Apr 2022 16:54:10 +0200 Subject: [PATCH 016/122] Add advance settings for application (#679) --- src/cloud_provider/service.rs | 3 - src/io_models.rs | 22 +++++-- src/models/application.rs | 84 ++++++++++++++++++++----- src/models/aws/application.rs | 30 +-------- src/models/database.rs | 5 -- src/models/digital_ocean/application.rs | 28 +-------- src/models/router.rs | 6 +- src/models/scaleway/application.rs | 49 ++++----------- test_utilities/src/common.rs | 20 +++--- 9 files changed, 113 insertions(+), 134 deletions(-) diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 29c45d75..aed0b930 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -14,7 +14,6 @@ use crate::cloud_provider::utilities::check_domain_for; use crate::cloud_provider::DeploymentTarget; use crate::cmd; use crate::cmd::helm; -use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::Statefulset; use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replicas_by_selector, ScalingKind}; use crate::cmd::structs::LabelsContent; @@ -66,7 +65,6 @@ pub trait Service: ToTransmitter { fn version(&self) -> String; fn action(&self) -> &Action; fn private_port(&self) -> Option; - fn start_timeout(&self) -> Timeout; fn total_cpus(&self) -> String; fn cpu_burst(&self) -> String; fn total_ram_in_mib(&self) -> u32; @@ -326,7 +324,6 @@ pub fn default_tera_context( environment: &Environment, ) -> TeraContext { let mut context = TeraContext::new(); - context.insert("id", service.id()); context.insert("owner_id", environment.owner_id.as_str()); context.insert("project_id", environment.project_id.as_str()); diff --git a/src/io_models.rs b/src/io_models.rs index 7aedf487..5c57e8cd 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -186,6 +186,19 @@ pub struct Port { pub protocol: Protocol, } +#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +pub struct ApplicationAdvanceSettings { + pub deployment_delay_start_time_sec: u32, +} + +impl Default for ApplicationAdvanceSettings { + fn default() -> Self { + ApplicationAdvanceSettings { + deployment_delay_start_time_sec: 30, + } + } +} + #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct Application { pub id: String, @@ -205,11 +218,12 @@ pub struct Application { pub total_ram_in_mib: u32, pub min_instances: u32, pub max_instances: u32, - pub start_timeout_in_seconds: u32, pub storage: Vec, /// Key is a String, Value is a base64 encoded String /// Use BTreeMap to get Hash trait which is not available on HashMap pub environment_vars: BTreeMap, + #[serde(default)] + pub advance_settings: ApplicationAdvanceSettings, } impl Application { @@ -235,10 +249,10 @@ impl Application { self.total_ram_in_mib, self.min_instances, self.max_instances, - self.start_timeout_in_seconds, build, self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), environment_variables, + self.advance_settings.clone(), AwsAppExtraSettings {}, listeners, logger.clone(), @@ -254,10 +268,10 @@ impl Application { self.total_ram_in_mib, self.min_instances, self.max_instances, - self.start_timeout_in_seconds, build, self.storage.iter().map(|s| s.to_do_storage()).collect::>(), environment_variables, + self.advance_settings.clone(), DoAppExtraSettings {}, listeners, logger.clone(), @@ -273,10 +287,10 @@ impl Application { self.total_ram_in_mib, self.min_instances, self.max_instances, - self.start_timeout_in_seconds, build, self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), environment_variables, + self.advance_settings.clone(), ScwAppExtraSettings {}, listeners, logger.clone(), diff --git a/src/models/application.rs b/src/models/application.rs index b40a061e..c8037108 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -1,5 +1,7 @@ use crate::build_platform::Build; -use crate::cloud_provider::models::{EnvironmentVariable, Storage}; +use crate::cloud_provider::environment::Environment; +use crate::cloud_provider::kubernetes::Kubernetes; +use crate::cloud_provider::models::{EnvironmentVariable, EnvironmentVariableDataTemplate, Storage}; use crate::cloud_provider::service::{delete_stateless_service, scale_down_application}; use crate::cloud_provider::service::{ deploy_stateless_service_error, deploy_user_stateless_service, send_progress_on_long_task, Action, Create, Delete, @@ -7,11 +9,10 @@ use crate::cloud_provider::service::{ }; use crate::cloud_provider::utilities::{print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; -use crate::cmd::helm::Timeout; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::{Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; +use crate::io_models::{ApplicationAdvanceSettings, Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; use crate::models::types::{CloudProvider, ToTeraContext}; use function_name::named; @@ -36,12 +37,12 @@ pub struct Application { pub(super) total_ram_in_mib: u32, pub(super) min_instances: u32, pub(super) max_instances: u32, - pub(super) start_timeout_in_seconds: u32, pub(super) build: Build, pub(super) storage: Vec>, pub(super) environment_variables: Vec, pub(super) listeners: Listeners, pub(super) logger: Box, + pub(super) advance_settings: ApplicationAdvanceSettings, pub(super) _extra_settings: T::AppExtraSettings, } @@ -58,10 +59,10 @@ impl Application { total_ram_in_mib: u32, min_instances: u32, max_instances: u32, - start_timeout_in_seconds: u32, build: Build, storage: Vec>, environment_variables: Vec, + advance_settings: ApplicationAdvanceSettings, extra_settings: T::AppExtraSettings, listeners: Listeners, logger: Box, @@ -80,16 +81,75 @@ impl Application { total_ram_in_mib, min_instances, max_instances, - start_timeout_in_seconds, build, storage, environment_variables, listeners, logger, + advance_settings, _extra_settings: extra_settings, }) } + pub(super) fn default_tera_context(&self, kubernetes: &dyn Kubernetes, environment: &Environment) -> TeraContext { + let mut context = TeraContext::new(); + context.insert("id", self.id()); + context.insert("owner_id", environment.owner_id.as_str()); + context.insert("project_id", environment.project_id.as_str()); + context.insert("organization_id", environment.organization_id.as_str()); + context.insert("environment_id", environment.id.as_str()); + context.insert("region", kubernetes.region().as_str()); + context.insert("zone", kubernetes.zone()); + context.insert("name", self.name()); + context.insert("sanitized_name", &self.sanitized_name()); + context.insert("namespace", environment.namespace()); + context.insert("cluster_name", kubernetes.name()); + context.insert("total_cpus", &self.total_cpus()); + context.insert("total_ram_in_mib", &self.total_ram_in_mib()); + context.insert("min_instances", &self.min_instances()); + context.insert("max_instances", &self.max_instances()); + + if let Some(private_port) = self.public_port() { + context.insert("is_private_port", &true); + context.insert("private_port", &private_port); + } else { + context.insert("is_private_port", &false); + } + + context.insert("version", &self.commit_id()); + + let commit_id = self.build.image.commit_id.as_str(); + context.insert("helm_app_version", &commit_id[..7]); + context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + context.insert( + "start_timeout_in_seconds", + &self.advance_settings.deployment_delay_start_time_sec, + ); + + let environment_variables = self + .environment_variables + .iter() + .map(|ev| EnvironmentVariableDataTemplate { + key: ev.key.clone(), + value: ev.value.clone(), + }) + .collect::>(); + + context.insert("environment_variables", &environment_variables); + context.insert("ports", &self.ports); + context.insert("is_registry_secret", &true); + context.insert("registry_secret", self.build().image.registry_host()); + + // TODO: Remove this + context.insert("clone", &false); + + if self.context.resource_expiration_in_seconds().is_some() { + context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) + } + + context + } + pub fn is_stateful(&self) -> bool { !self.storage.is_empty() } @@ -125,10 +185,6 @@ impl Application { .map(|port| port.port as u16) } - pub fn start_timeout(&self) -> u32 { - (self.start_timeout_in_seconds + 10) * 4 - } - pub fn total_cpus(&self) -> String { self.total_cpus.to_string() } @@ -169,7 +225,7 @@ impl Application { &mut self.build } - pub fn sanitize_name(&self) -> String { + pub fn sanitized_name(&self) -> String { sanitize_name("app", self.id()) } @@ -225,7 +281,7 @@ where } fn sanitized_name(&self) -> String { - self.sanitize_name() + self.sanitized_name() } fn version(&self) -> String { @@ -240,10 +296,6 @@ where self.public_port() } - fn start_timeout(&self) -> Timeout { - Timeout::Value(self.start_timeout()) - } - fn total_cpus(&self) -> String { self.total_cpus() } diff --git a/src/models/aws/application.rs b/src/models/aws/application.rs index 85986e74..2d159c01 100644 --- a/src/models/aws/application.rs +++ b/src/models/aws/application.rs @@ -1,6 +1,5 @@ use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; -use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::models::StorageDataTemplate; use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; @@ -13,25 +12,7 @@ use tera::Context as TeraContext; impl ToTeraContext for Application { fn to_tera_context(&self, target: &DeploymentTarget) -> Result { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); - let mut context = default_tera_context(self, target.kubernetes, target.environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build().image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - context.insert("registry_secret", self.build().image.registry_host()); + let mut context = self.default_tera_context(target.kubernetes, target.environment); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -52,7 +33,6 @@ impl ToTeraContext for Application { )); } }; - context.insert("cpu_burst", &cpu_limits.cpu_limit); let storage = self @@ -78,12 +58,6 @@ impl ToTeraContext for Application { context.insert("storage", &storage); context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } Ok(context) } diff --git a/src/models/database.rs b/src/models/database.rs index 7d8db645..4555e335 100644 --- a/src/models/database.rs +++ b/src/models/database.rs @@ -5,7 +5,6 @@ use crate::cloud_provider::service::{ }; use crate::cloud_provider::utilities::{check_domain_for, managed_db_name_sanitizer, print_action}; use crate::cloud_provider::{service, DeploymentTarget}; -use crate::cmd::helm::Timeout; use crate::cmd::kubectl; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; @@ -205,10 +204,6 @@ where Some(self.private_port) } - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - fn total_cpus(&self) -> String { self.total_cpus.to_string() } diff --git a/src/models/digital_ocean/application.rs b/src/models/digital_ocean/application.rs index 664c8bde..3e637e41 100644 --- a/src/models/digital_ocean/application.rs +++ b/src/models/digital_ocean/application.rs @@ -1,6 +1,5 @@ use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; -use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::models::StorageDataTemplate; use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; @@ -15,11 +14,7 @@ impl ToTeraContext for Application { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); - - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); + let mut context = self.default_tera_context(kubernetes, environment); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -42,19 +37,6 @@ impl ToTeraContext for Application { }; context.insert("cpu_burst", &cpu_limits.cpu_limit); - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); - // This is specific to digital ocean as it is them that create the registry secret // we don't have the hand on it context.insert("registry_secret", "do-container-registry-secret-for-cluster"); @@ -79,12 +61,6 @@ impl ToTeraContext for Application { context.insert("storage", &storage); context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } Ok(context) } diff --git a/src/models/router.rs b/src/models/router.rs index 1fc3a9b7..73a9a44d 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -7,7 +7,7 @@ use crate::cloud_provider::service::{ use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name}; use crate::cloud_provider::DeploymentTarget; use crate::cmd::helm; -use crate::cmd::helm::{to_engine_error, Timeout}; +use crate::cmd::helm::to_engine_error; use crate::errors::EngineError; use crate::events::{EngineEvent, EnvironmentStep, EventMessage, Stage, ToTransmitter, Transmitter}; use crate::io_models::{Context, Listen, Listener, Listeners}; @@ -263,10 +263,6 @@ where None } - fn start_timeout(&self) -> Timeout { - Timeout::Default - } - fn total_cpus(&self) -> String { "1".to_string() } diff --git a/src/models/scaleway/application.rs b/src/models/scaleway/application.rs index e43883dd..07819e4d 100644 --- a/src/models/scaleway/application.rs +++ b/src/models/scaleway/application.rs @@ -1,6 +1,5 @@ use crate::cloud_provider::kubernetes::validate_k8s_required_cpu_and_burstable; -use crate::cloud_provider::models::{EnvironmentVariableDataTemplate, StorageDataTemplate}; -use crate::cloud_provider::service::default_tera_context; +use crate::cloud_provider::models::StorageDataTemplate; use crate::cloud_provider::DeploymentTarget; use crate::errors::EngineError; use crate::events::{EnvironmentStep, Stage}; @@ -15,25 +14,19 @@ impl ToTeraContext for Application { let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::LoadConfiguration)); let kubernetes = target.kubernetes; let environment = target.environment; - let mut context = default_tera_context(self, kubernetes, environment); - let commit_id = self.build.image.commit_id.as_str(); + let mut context = self.default_tera_context(kubernetes, environment); - context.insert("helm_app_version", &commit_id[..7]); - context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); - - let environment_variables = self - .environment_variables - .iter() - .map(|ev| EnvironmentVariableDataTemplate { - key: ev.key.clone(), - value: ev.value.clone(), - }) - .collect::>(); - - context.insert("environment_variables", &environment_variables); - context.insert("ports", &self.ports); - context.insert("is_registry_secret", &true); + // container registry credentials context.insert("registry_secret_name", &format!("registry-token-{}", &self.id)); + context.insert( + "container_registry_docker_json_config", + self.build + .image + .clone() + .registry_docker_json_config + .unwrap_or_default() + .as_str(), + ); let cpu_limits = match validate_k8s_required_cpu_and_burstable( &ListenersHelper::new(&self.listeners), @@ -77,26 +70,8 @@ impl ToTeraContext for Application { .collect::>(); let is_storage = !storage.is_empty(); - context.insert("storage", &storage); context.insert("is_storage", &is_storage); - context.insert("clone", &false); - context.insert("start_timeout_in_seconds", &self.start_timeout_in_seconds); - - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - - // container registry credentials - context.insert( - "container_registry_docker_json_config", - self.build - .image - .clone() - .registry_docker_json_config - .unwrap_or_default() - .as_str(), - ); Ok(context) } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index af35a0f8..c000f998 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -298,7 +298,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }, Application { id: generate_id(), @@ -344,7 +344,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }, Application { id: generate_id(), @@ -392,7 +392,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }, ], routers: vec![ @@ -553,7 +553,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }], routers: vec![Router { id: router_id, @@ -608,7 +608,7 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { min_instances: 1, max_instances: 1, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 120, + advance_settings: Default::default(), }], routers: vec![], databases: vec![], @@ -721,7 +721,7 @@ pub fn environnement_2_app_2_routers_1_psql( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }, Application { id: generate_id(), @@ -767,7 +767,7 @@ pub fn environnement_2_app_2_routers_1_psql( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }, ], routers: vec![ @@ -864,7 +864,7 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }], routers: vec![Router { id: generate_id(), @@ -925,7 +925,7 @@ pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }], routers: vec![], databases: vec![], @@ -974,7 +974,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - start_timeout_in_seconds: 60, + advance_settings: Default::default(), }], routers: vec![Router { id: generate_id(), From a2a71cd7d00141c6a4750a66da85b0521160b800 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 7 Apr 2022 05:52:16 +0200 Subject: [PATCH 017/122] feat: add ephemeral storage to engine In order to avoid using network storage, bringing random performances on container build, we're going to try local storage instead and see how it behaves --- .../charts/qovery-engine/templates/statefulset-deploy.yaml | 6 ++++++ lib/common/bootstrap/charts/qovery-engine/values.yaml | 6 ++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml index 6822562b..e2800e81 100644 --- a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml +++ b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml @@ -112,6 +112,7 @@ spec: mountPath: {{ .Values.volumes.qoveryWorkspace.path }} - name: docker-graph-storage mountPath: {{ .Values.volumes.dockerGraphStorage.path }} + {{ if .Values.volumes.useNetworkDisks }} volumeClaimTemplates: - metadata: name: docker-graph-storage @@ -135,4 +136,9 @@ spec: resources: requests: storage: {{ .Values.volumes.qoveryWorkspace.size }} + {{ else }} + volumes: + - name: docker-graph-storage + emptyDir: {} + {{ end }} {{ end }} \ No newline at end of file diff --git a/lib/common/bootstrap/charts/qovery-engine/values.yaml b/lib/common/bootstrap/charts/qovery-engine/values.yaml index ceb01a52..0c7f3657 100644 --- a/lib/common/bootstrap/charts/qovery-engine/values.yaml +++ b/lib/common/bootstrap/charts/qovery-engine/values.yaml @@ -32,6 +32,7 @@ environmentVariables: #REGION: "" volumes: + useNetworkDisks: true storageClassName: "" qoveryWorkspace: size: 20Gi @@ -43,7 +44,7 @@ volumes: buildContainer: enable: true image: docker - tag: 20.10.11-dind + tag: 20.10.14-dind terminationGracePeriodSeconds: 7200 @@ -78,10 +79,11 @@ buildResources: {} # limits: # cpu: 100m # memory: 128Mi + # ephemeral-storage: 40Gi # requests: # cpu: 100m # memory: 128Mi - + # ephemeral-storage: 40Gi nodeSelector: {} From 376cb0c92c423ced826e7184e7f6b97947f564bf Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 7 Apr 2022 11:40:35 +0200 Subject: [PATCH 018/122] fix: missing volume in new engine local storage --- .../charts/qovery-engine/templates/statefulset-deploy.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml index e2800e81..31b4070f 100644 --- a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml +++ b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml @@ -1,6 +1,6 @@ {{- $kubefullname := include "qovery-engine.fullname" . }} apiVersion: apps/v1 -{{ if .Values.buildContainer.enable }} +{{ if and .Values.buildContainer.enable .Values.volumes.useNetworkDisks }} kind: StatefulSet {{ else }} kind: Deployment @@ -140,5 +140,7 @@ spec: volumes: - name: docker-graph-storage emptyDir: {} + - name: qovery-workspace + emptyDir: {} {{ end }} {{ end }} \ No newline at end of file From 6d7f3bd4c452d908c24172c4ec71b2def5606f39 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 7 Apr 2022 12:11:42 +0200 Subject: [PATCH 019/122] fix: remove engine deployment serviceAccountName --- .../charts/qovery-engine/templates/statefulset-deploy.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml index 31b4070f..506d37e5 100644 --- a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml +++ b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml @@ -30,7 +30,9 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} + {{ if and .Values.buildContainer.enable .Values.volumes.useNetworkDisks }} serviceAccountName: {{ include "qovery-engine.serviceAccountName" . }} + {{ end }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} {{- with .Values.nodeSelector }} From b47c2d2a817c3207e779b252353744d1c5b7ec3f Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 7 Apr 2022 13:48:40 +0200 Subject: [PATCH 020/122] feat: manage engine local storage disk saturation --- src/build_platform/local_docker.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 9728a235..8ff9cd21 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -72,14 +72,16 @@ impl LocalDocker { // ensure there is enough disk space left before building a new image let docker_path_string = "/var/lib/docker"; + let root_path_string = "/"; let docker_path = Path::new(docker_path_string); + let root_path = Path::new(root_path_string); // get system info let mut system = sysinfo::System::new_all(); system.refresh_all(); for disk in system.get_disks() { - if disk.get_mount_point() == docker_path { + if disk.get_mount_point() == docker_path || disk.get_mount_point() == root_path { let event_details = self.get_event_details(); if let Err(e) = check_docker_space_usage_and_clean( &self.context.docker, @@ -513,7 +515,8 @@ fn check_docker_space_usage_and_clean( event_details: EventDetails, logger: &dyn Logger, ) -> Result<(), DockerError> { - let docker_max_disk_percentage_usage_before_purge = 60; // arbitrary percentage that should make the job anytime + // since we use local storage, this % should not be too high to avoid reaching limits on ephemeral storage + let docker_max_disk_percentage_usage_before_purge = 20; // arbitrary percentage that should make the job anytime let available_space = docker_path_size_info.get_available_space(); let docker_percentage_remaining = available_space * 100 / docker_path_size_info.get_total_space(); From b02e2529a0ebc3e9f0094a510059b5691e9d4724 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 7 Apr 2022 14:09:46 +0200 Subject: [PATCH 021/122] fix: another fix for engine deployment --- .../charts/qovery-engine/templates/statefulset-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml index 506d37e5..d43edb03 100644 --- a/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml +++ b/lib/common/bootstrap/charts/qovery-engine/templates/statefulset-deploy.yaml @@ -11,7 +11,7 @@ metadata: {{- include "qovery-engine.labels" . | nindent 4 }} spec: replicas: {{ .Values.autoscaler.min_replicas }} - {{ if .Values.buildContainer.enable }} + {{ if and .Values.buildContainer.enable .Values.volumes.useNetworkDisks }} serviceName: qovery-engine {{ end }} selector: From aa9302a58899d6129abbda60ddc2ffa59b1f409e Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Fri, 8 Apr 2022 10:01:55 +0200 Subject: [PATCH 022/122] feat: adding extra docker build summary log (#682) Ticket: ENG-1163 --- src/build_platform/local_docker.rs | 58 ++++++++--- src/build_platform/mod.rs | 4 +- src/cmd/docker.rs | 152 +++++++++++++++++++++++++++-- 3 files changed, 191 insertions(+), 23 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 8ff9cd21..c433e558 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -13,7 +13,7 @@ use crate::build_platform::{Build, BuildError, BuildPlatform, Credentials, Kind} use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; -use crate::cmd::docker::{ContainerImage, Docker, DockerError}; +use crate::cmd::docker::{BuildResult, ContainerImage, Docker, DockerError}; use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; @@ -106,7 +106,7 @@ impl LocalDocker { into_dir_docker_style: &str, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result<(), BuildError> { + ) -> Result { // logger let log_info = { let app_id = build.image.application_id.clone(); @@ -147,18 +147,22 @@ impl LocalDocker { build.environment_variables.retain(|k, _| dockerfile_args.contains(k)); build.compute_image_tag(); + let mut build_result = BuildResult::new(); + // Prepare image we want to build let image_to_build = ContainerImage { registry: build.image.registry_url.clone(), name: build.image.name(), tags: vec![build.image.tag.clone(), "latest".to_string()], }; + build_result.build_candidate_image(Some(image_to_build.clone())); let image_cache = ContainerImage { registry: build.image.registry_url.clone(), name: build.image.name(), tags: vec!["latest".to_string()], }; + build_result.source_cached_image(Some(image_cache.clone())); // Check if the image does not exist already remotely, if yes, we skip the build let image_name = image_to_build.image_name(); @@ -167,10 +171,12 @@ impl LocalDocker { log_info(format!("🎯 Skipping build. Image already exist in the registry {}", image_name)); // skip build - return Ok(()); + build_result.image_exists_remotely(true); + return Ok(build_result); } - log_info(format!("⛏️ Building image. It does not exist remotely {}", image_name)); + log_info(format!("⛏️Building image. It does not exist remotely {}", image_name)); + // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build .environment_variables @@ -191,7 +197,7 @@ impl LocalDocker { ); match exit_status { - Ok(_) => Ok(()), + Ok(build_result) => Ok(build_result), Err(DockerError::Aborted(msg)) => Err(BuildError::Aborted(msg)), Err(err) => Err(BuildError::DockerError(build.image.application_id.clone(), err)), } @@ -204,9 +210,23 @@ impl LocalDocker { use_build_cache: bool, lh: &ListenersHelper, is_task_canceled: &dyn Fn() -> bool, - ) -> Result<(), BuildError> { + ) -> Result { + const LATEST_TAG: &str = "latest"; let name_with_tag = build.image.full_image_name_with_tag(); - let name_with_latest_tag = format!("{}:latest", build.image.full_image_name()); + let container_image = ContainerImage::new( + build.image.registry_url.clone(), + build.image.name.to_string(), + vec![build.image.tag.to_string()], + ); + let container_image_cache = ContainerImage::new( + build.image.registry_url.clone(), + build.image.name.to_string(), + vec![LATEST_TAG.to_string()], + ); + let name_with_latest_tag = format!("{}:{}", build.image.full_image_name(), LATEST_TAG); + let mut build_result = BuildResult::new(); + build_result.build_candidate_image(Some(container_image)); + build_result.source_cached_image(Some(container_image_cache)); let mut exit_status: Result<(), command::CommandError> = Err(command::CommandError::ExecutionError( Error::new(ErrorKind::InvalidData, "No builder names".to_string()), @@ -312,7 +332,10 @@ impl LocalDocker { } match exit_status { - Ok(_) => Ok(()), + Ok(_) => { + build_result.built(true); + Ok(build_result) + } Err(Killed(msg)) => Err(BuildError::Aborted(msg)), Err(err) => Err(BuildError::BuildpackError(build.image.application_id.clone(), err)), } @@ -351,7 +374,7 @@ impl BuildPlatform for LocalDocker { self.name.as_str() } - fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError> { + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result { let event_details = self.get_event_details(); let listeners_helper = ListenersHelper::new(&self.listeners); let app_id = build.image.application_id.clone(); @@ -375,8 +398,7 @@ impl BuildPlatform for LocalDocker { self.context.execution_id(), )); self.logger - .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(msg))); - // LOGGING + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(msg))); // Create callback that will be called by git to provide credentials per user // If people use submodule, they need to provide us their ssh key @@ -485,6 +507,20 @@ impl BuildPlatform for LocalDocker { ) }; + // log image building infos + if let Ok(build_result) = &result { + listeners_helper.deployment_in_progress(ProgressInfo::new( + ProgressScope::Application { id: app_id }, + ProgressLevel::Info, + Some(build_result.to_string()), + self.context.execution_id(), + )); + self.logger.log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe(build_result.to_string()), + )); + } + result } diff --git a/src/build_platform/mod.rs b/src/build_platform/mod.rs index 55a57eb0..59633a51 100644 --- a/src/build_platform/mod.rs +++ b/src/build_platform/mod.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use crate::cmd::command::CommandError; -use crate::cmd::docker::DockerError; +use crate::cmd::docker::{BuildResult, DockerError}; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter}; use crate::io_models::{Context, Listen, QoveryIdentifier}; @@ -52,7 +52,7 @@ pub trait BuildPlatform: ToTransmitter + Listen { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } - fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result<(), BuildError>; + fn build(&self, build: &mut Build, is_task_canceled: &dyn Fn() -> bool) -> Result; fn logger(&self) -> Box; fn get_event_details(&self) -> EventDetails { let context = self.context(); diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 1d3d37d8..5160bad4 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -1,5 +1,6 @@ use crate::cmd::command::{CommandError, CommandKiller, QoveryCommand}; use lazy_static::lazy_static; +use std::fmt::{Display, Formatter}; use std::path::Path; use std::process::ExitStatus; use std::sync::Mutex; @@ -30,7 +31,110 @@ lazy_static! { static ref LOGIN_LOCK: Mutex<()> = Mutex::new(()); } -#[derive(Debug)] +#[derive(Clone, Debug)] +pub struct BuildResult { + source_cached_image: Option, + build_candidate_image: Option, + cached_image_pulled: bool, + image_exists_remotely: bool, + built: bool, + pushed: bool, +} + +impl BuildResult { + pub fn new() -> Self { + Self { + source_cached_image: None, + build_candidate_image: None, + cached_image_pulled: false, + image_exists_remotely: false, + built: false, + pushed: false, + } + } + + pub fn source_cached_image(&mut self, source_cached_image: Option) -> &mut Self { + self.source_cached_image = source_cached_image; + self + } + + pub fn build_candidate_image(&mut self, build_candidate_image: Option) -> &mut Self { + self.build_candidate_image = build_candidate_image; + self + } + + pub fn cached_image_pulled(&mut self, cached_image_pulled: bool) -> &mut Self { + self.cached_image_pulled = cached_image_pulled; + self + } + + pub fn image_exists_remotely(&mut self, image_exists_remotely: bool) -> &mut Self { + self.image_exists_remotely = image_exists_remotely; + self + } + + pub fn built(&mut self, built: bool) -> &mut Self { + self.built = built; + self + } + + pub fn pushed(&mut self, pushed: bool) -> &mut Self { + self.pushed = pushed; + self + } +} + +impl Default for BuildResult { + fn default() -> Self { + BuildResult::new() + } +} + +impl Display for BuildResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("Build summary:")?; + + match &self.build_candidate_image { + Some(image) => f.write_str(format!("\n\t🐳️ image to be built: `{}`", image.image_name()).as_str())?, + None => return f.write_str("\n\t⁉️ no image to be built provided"), + }; + + // image remotely exists? + match &self.image_exists_remotely { + true => f.write_str("\n\t♻️ image exists remotely")?, + false => f.write_str("\n\t🕳 image doesn't exist remotely")?, + }; + + // cache + // TODO(benjaminch): check whether cached image exists locally before pulling in order to get more details here + match &self.source_cached_image { + Some(cache) => { + f.write_str(format!("\n\t🍀 cached image provided: `{}`", cache.image_name()).as_str())?; + match self.cached_image_pulled { + true => f.write_str("\n\t✔️ cached image pulled")?, + false => f.write_str("\n\t⁉️ cached image not pulled (most likely doesn't exists remotely)")?, + } + } + None => f.write_str("\n\t🕳 no cached image provided")?, + }; + + // image built + match self.built { + true => f.write_str("\n\t🎉 image built")?, + false => f.write_str("\n\t‼️ image not built")?, + }; + + // image pushed + match self.pushed { + true => f.write_str("\n\t🚀 image pushed")?, + false => f.write_str("\n\t‼️ image not pushed")?, + }; + + Ok(()) + } +} + +#[derive(Debug, Clone)] pub struct ContainerImage { pub registry: Url, pub name: String, @@ -38,6 +142,10 @@ pub struct ContainerImage { } impl ContainerImage { + pub fn new(registry: Url, name: String, tags: Vec) -> Self { + ContainerImage { registry, name, tags } + } + pub fn image_names(&self) -> Vec { let host = if let Some(port) = self.registry.port() { format!("{}:{}", self.registry.host_str().unwrap_or_default(), port) @@ -230,14 +338,17 @@ impl Docker { stdout_output: &mut Stdout, stderr_output: &mut Stderr, should_abort: &CommandKiller, - ) -> Result<(), DockerError> + ) -> Result where Stdout: FnMut(String), Stderr: FnMut(String), { + let mut build_result = BuildResult::new(); + // if there is no tags, nothing to build if image_to_build.tags.is_empty() { - return Ok(()); + build_result.built = false; + return Ok(build_result); } // Do some checks @@ -293,15 +404,22 @@ impl Docker { stdout_output: &mut Stdout, stderr_output: &mut Stderr, should_abort: &CommandKiller, - ) -> Result<(), DockerError> + ) -> Result where Stdout: FnMut(String), Stderr: FnMut(String), { info!("Docker build {:?}", image_to_build.image_name()); + let mut build_result = BuildResult::new(); + build_result.build_candidate_image(Some(image_to_build.clone())); + build_result.source_cached_image(Some(cache.clone())); + // Best effort to pull the cache, if it does not exist that's ok too - let _ = self.pull(cache, stdout_output, stderr_output, should_abort); + match self.pull(cache, stdout_output, stderr_output, should_abort) { + Ok(_) => build_result.cached_image_pulled(true), + Err(_) => build_result.cached_image_pulled(false), + }; let mut args_string: Vec = vec![ "build".to_string(), @@ -318,7 +436,7 @@ impl Docker { for img_cache_name in cache.image_names() { args_string.push("--tag".to_string()); - args_string.push(img_cache_name) + args_string.push(img_cache_name.to_string()); } for (k, v) in build_args { @@ -335,12 +453,14 @@ impl Docker { stderr_output, should_abort, )?; + build_result.built(true); if push_after_build { let _ = self.push(image_to_build, stdout_output, stderr_output, should_abort)?; + build_result.pushed(true); } - Ok(()) + Ok(build_result) } fn build_with_buildkit( @@ -354,19 +474,24 @@ impl Docker { stdout_output: &mut Stdout, stderr_output: &mut Stderr, should_abort: &CommandKiller, - ) -> Result<(), DockerError> + ) -> Result where Stdout: FnMut(String), Stderr: FnMut(String), { info!("Docker buildkit build {:?}", image_to_build.image_name()); + let mut build_result = BuildResult::new(); + build_result.build_candidate_image(Some(image_to_build.clone())); + build_result.source_cached_image(Some(cache.clone())); + let mut args_string: Vec = vec![ "buildx".to_string(), "build".to_string(), "--progress=plain".to_string(), "--network=host".to_string(), if push_after_build { + build_result.pushed(true); "--output=type=registry".to_string() // tell buildkit to push image to registry } else { "--output=type=docker".to_string() // tell buildkit to load the image into docker after build @@ -393,13 +518,20 @@ impl Docker { args_string.push(context.to_str().unwrap_or_default().to_string()); - docker_exec( + match docker_exec( &args_string.iter().map(|x| x.as_str()).collect::>(), &self.get_all_envs(&[]), stdout_output, stderr_output, should_abort, - ) + ) { + Ok(_) => { + build_result.cached_image_pulled(true); // --cache-from + build_result.built(true); + Ok(build_result) + } + Err(e) => Err(e), + } } pub fn push( From 58838e9218eb3957c8a74e2ba6db480b2efc6544 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Fri, 8 Apr 2022 11:15:34 +0200 Subject: [PATCH 023/122] feat: better format building summary logs --- src/cmd/docker.rs | 80 ++++++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 5160bad4..0e8c7fdc 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -92,45 +92,53 @@ impl Default for BuildResult { impl Display for BuildResult { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("Build summary:")?; + if self.build_candidate_image.is_none() { + return f.write_str( + r#" +Build summary: + ⁉️ no image to be built provided +"#, + ); + } - match &self.build_candidate_image { - Some(image) => f.write_str(format!("\n\t🐳️ image to be built: `{}`", image.image_name()).as_str())?, - None => return f.write_str("\n\t⁉️ no image to be built provided"), - }; - - // image remotely exists? - match &self.image_exists_remotely { - true => f.write_str("\n\t♻️ image exists remotely")?, - false => f.write_str("\n\t🕳 image doesn't exist remotely")?, - }; - - // cache - // TODO(benjaminch): check whether cached image exists locally before pulling in order to get more details here - match &self.source_cached_image { - Some(cache) => { - f.write_str(format!("\n\t🍀 cached image provided: `{}`", cache.image_name()).as_str())?; - match self.cached_image_pulled { - true => f.write_str("\n\t✔️ cached image pulled")?, - false => f.write_str("\n\t⁉️ cached image not pulled (most likely doesn't exists remotely)")?, - } + let image_to_be_built = self + .build_candidate_image + .as_ref() + .expect("cannot get image to be built"); + let output = format!( + r#" +Build summary: + 🐳️ image to be built: `{}` + {} + {} + {} + {} + {}"#, + image_to_be_built.image_name(), + match &self.image_exists_remotely { + true => "♻️ image exists remotely", + false => "🕳 image doesn't exist remotely", + }, + // TODO(benjaminch): check whether cached image exists locally before pulling in order to get more details here + match &self.source_cached_image { + Some(cache) => format!("🍀 cached image provided: `{}`", cache.image_name()), + None => "🕳 no cached image provided".to_string(), + }, + match self.cached_image_pulled { + true => "✔️ cached image pulled", + false => "⁉️ cached image not pulled (most likely doesn't exists remotely)", + }, + match self.built { + true => "🎉 image built", + false => "‼️ image not built", + }, + match self.pushed { + true => "🚀 image pushed", + false => "‼️ image not pushed", } - None => f.write_str("\n\t🕳 no cached image provided")?, - }; + ); - // image built - match self.built { - true => f.write_str("\n\t🎉 image built")?, - false => f.write_str("\n\t‼️ image not built")?, - }; - - // image pushed - match self.pushed { - true => f.write_str("\n\t🚀 image pushed")?, - false => f.write_str("\n\t‼️ image not pushed")?, - }; - - Ok(()) + f.write_str(output.as_str()) } } From 90af2130e6a19894c956673e3f3c68c85b7c83c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Mon, 11 Apr 2022 15:17:39 +0200 Subject: [PATCH 024/122] Cleanup docker image purge (#684) --- src/build_platform/local_docker.rs | 109 ++++++++++++----------------- src/cmd/docker.rs | 2 +- 2 files changed, 46 insertions(+), 65 deletions(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index c433e558..454057ba 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -6,15 +6,15 @@ use std::time::Duration; use std::{env, fs}; use git2::{Cred, CredentialType}; -use sysinfo::{Disk, DiskExt, SystemExt}; +use sysinfo::{DiskExt, RefreshKind, SystemExt}; use crate::build_platform::dockerfile_utils::extract_dockerfile_args; use crate::build_platform::{Build, BuildError, BuildPlatform, Credentials, Kind}; use crate::cmd::command; use crate::cmd::command::CommandError::Killed; use crate::cmd::command::{CommandKiller, QoveryCommand}; -use crate::cmd::docker::{BuildResult, ContainerImage, Docker, DockerError}; -use crate::events::{EngineEvent, EventDetails, EventMessage, ToTransmitter, Transmitter}; +use crate::cmd::docker::{BuildResult, ContainerImage, DockerError}; +use crate::events::{EngineEvent, EventMessage, ToTransmitter, Transmitter}; use crate::fs::workspace_directory; use crate::git; use crate::io_models::{ @@ -61,6 +61,8 @@ impl LocalDocker { } fn reclaim_space_if_needed(&self) { + // ensure there is enough disk space left before building a new image + // For CI, we should skip this job if env::var_os("CI").is_some() { self.logger.log(EngineEvent::Info( self.get_event_details(), @@ -70,32 +72,47 @@ impl LocalDocker { return; } - // ensure there is enough disk space left before building a new image - let docker_path_string = "/var/lib/docker"; - let root_path_string = "/"; - let docker_path = Path::new(docker_path_string); - let root_path = Path::new(root_path_string); + // arbitrary percentage that should make the job anytime + const DISK_FREE_SPACE_PERCENTAGE_BEFORE_PURGE: u64 = 20; + let mount_points_to_check = vec![Path::new("/var/lib/docker"), Path::new("/")]; + let mut disk_free_space_percent: u64 = 100; - // get system info - let mut system = sysinfo::System::new_all(); - system.refresh_all(); + let sys_info = sysinfo::System::new_with_specifics(RefreshKind::new().with_disks().with_disks_list()); + let should_reclaim_space = sys_info.get_disks().iter().any(|disk| { + // Check disk own the mount point we are interested in + if !mount_points_to_check.contains(&disk.get_mount_point()) { + return false; + } - for disk in system.get_disks() { - if disk.get_mount_point() == docker_path || disk.get_mount_point() == root_path { - let event_details = self.get_event_details(); - if let Err(e) = check_docker_space_usage_and_clean( - &self.context.docker, - disk, - event_details.clone(), - &*self.logger(), - ) { - self.logger.log(EngineEvent::Warning( - event_details, - EventMessage::new(e.to_string(), Some(e.to_string())), - )); - } - break; - }; + // Check if we have hit our threshold regarding remaining disk space + disk_free_space_percent = disk.get_available_space() * 100 / disk.get_total_space(); + if disk_free_space_percent <= DISK_FREE_SPACE_PERCENTAGE_BEFORE_PURGE { + return true; + } + + false + }); + + if !should_reclaim_space { + debug!( + "Docker skipping image purge, still {} % disk free space", + disk_free_space_percent + ); + return; + } + + let msg = format!( + "Purging docker images to reclaim disk space. Only {} % disk free space, This may take some time", + disk_free_space_percent + ); + self.logger + .log(EngineEvent::Info(self.get_event_details(), EventMessage::new_from_safe(msg))); + + // Request a purge if a disk is being low on space + if let Err(err) = self.context.docker.prune_images() { + let msg = format!("Error while purging docker images: {}", err); + self.logger + .log(EngineEvent::Warning(self.get_event_details(), EventMessage::new_from_safe(msg))); } } @@ -175,7 +192,7 @@ impl LocalDocker { return Ok(build_result); } - log_info(format!("⛏️Building image. It does not exist remotely {}", image_name)); + log_info(format!("⛏️ Building image. It does not exist remotely {}", image_name)); // Actually do the build of the image let env_vars: Vec<(&str, &str)> = build @@ -544,39 +561,3 @@ impl ToTransmitter for LocalDocker { Transmitter::BuildPlatform(self.id().to_string(), self.name().to_string()) } } - -fn check_docker_space_usage_and_clean( - docker: &Docker, - docker_path_size_info: &Disk, - event_details: EventDetails, - logger: &dyn Logger, -) -> Result<(), DockerError> { - // since we use local storage, this % should not be too high to avoid reaching limits on ephemeral storage - let docker_max_disk_percentage_usage_before_purge = 20; // arbitrary percentage that should make the job anytime - let available_space = docker_path_size_info.get_available_space(); - let docker_percentage_remaining = available_space * 100 / docker_path_size_info.get_total_space(); - - if docker_percentage_remaining < docker_max_disk_percentage_usage_before_purge || available_space == 0 { - logger.log(EngineEvent::Warning( - event_details, - EventMessage::new_from_safe(format!( - "Docker disk remaining ({}%) is lower than {}%, requesting cleaning (purge)", - docker_percentage_remaining, docker_max_disk_percentage_usage_before_purge - )), - )); - - return docker.prune_images(); - }; - - logger.log(EngineEvent::Info( - event_details, - EventMessage::new_from_safe(format!( - "No need to purge old docker images, only {}% ({}/{}) disk used", - 100 - docker_percentage_remaining, - docker_path_size_info.get_available_space(), - docker_path_size_info.get_total_space(), - )), - )); - - Ok(()) -} diff --git a/src/cmd/docker.rs b/src/cmd/docker.rs index 0e8c7fdc..0cf72ca3 100644 --- a/src/cmd/docker.rs +++ b/src/cmd/docker.rs @@ -565,11 +565,11 @@ impl Docker { info!("Docker prune images"); let all_prunes_commands = vec![ + vec!["buildx", "prune", "-a", "-f"], vec!["container", "prune", "-f"], vec!["image", "prune", "-a", "-f"], vec!["builder", "prune", "-a", "-f"], vec!["volume", "prune", "-f"], - vec!["buildx", "prune", "-a", "-f"], ]; let mut errored_commands = vec![]; From 6f123a49f7d0abfc2c87fde9593ecfe965f4824d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:22:32 +0200 Subject: [PATCH 025/122] Create charts.yaml --- .github/workflows/charts.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/charts.yaml diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml new file mode 100644 index 00000000..fd56e247 --- /dev/null +++ b/.github/workflows/charts.yaml @@ -0,0 +1,15 @@ +name: release-chart +on: + push: + branches: 'dev' + +jobs: + release-chart: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Publish Helm chart + uses: stefanprodan/helm-gh-pages@master + with: + token: ${{ secrets.GITHUB_TOKEN }} + charts_dir: lib/common/charts/qovery-shell-agent From fe78a7e2afbd35ad6bece5abbbe2e02621102523 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:24:33 +0200 Subject: [PATCH 026/122] Update charts.yaml --- .github/workflows/charts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml index fd56e247..de7d8b45 100644 --- a/.github/workflows/charts.yaml +++ b/.github/workflows/charts.yaml @@ -12,4 +12,4 @@ jobs: uses: stefanprodan/helm-gh-pages@master with: token: ${{ secrets.GITHUB_TOKEN }} - charts_dir: lib/common/charts/qovery-shell-agent + charts_dir: lib/common/bootstrap/charts/qovery-shell-agent From 6f966a7fc9f0bc0d5faef246bf141219716528ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:27:00 +0200 Subject: [PATCH 027/122] Update charts.yaml --- .github/workflows/charts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml index de7d8b45..c9f54f18 100644 --- a/.github/workflows/charts.yaml +++ b/.github/workflows/charts.yaml @@ -12,4 +12,4 @@ jobs: uses: stefanprodan/helm-gh-pages@master with: token: ${{ secrets.GITHUB_TOKEN }} - charts_dir: lib/common/bootstrap/charts/qovery-shell-agent + charts_dir: lib/common/bootstrap/charts/ From df988187ec08c368fe85f8f3e78cc53bd33fd66b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:58:41 +0200 Subject: [PATCH 028/122] Move qovery charts that we publish upstream (#686) --- lib/common/bootstrap/charts/qovery/README.txt | 4 ++++ .../bootstrap/charts/{ => qovery}/qovery-agent/.helmignore | 0 .../bootstrap/charts/{ => qovery}/qovery-agent/Chart.yaml | 0 .../charts/{ => qovery}/qovery-agent/templates/_helpers.tpl | 0 .../{ => qovery}/qovery-agent/templates/clusterrole.yaml | 0 .../qovery-agent/templates/clusterrolebinding.yaml | 0 .../{ => qovery}/qovery-agent/templates/deployment.yaml | 0 .../charts/{ => qovery}/qovery-agent/templates/hpa.yaml | 0 .../charts/{ => qovery}/qovery-agent/templates/secret.yaml | 0 .../{ => qovery}/qovery-agent/templates/serviceaccount.yaml | 0 .../bootstrap/charts/{ => qovery}/qovery-agent/values.yaml | 0 .../charts/{ => qovery}/qovery-shell-agent/.helmignore | 0 .../charts/{ => qovery}/qovery-shell-agent/Chart.yaml | 0 .../{ => qovery}/qovery-shell-agent/templates/_helpers.tpl | 0 .../qovery-shell-agent/templates/clusterrole.yaml | 0 .../qovery-shell-agent/templates/clusterrolebinding.yaml | 0 .../{ => qovery}/qovery-shell-agent/templates/deployment.yaml | 0 .../{ => qovery}/qovery-shell-agent/templates/secret.yaml | 0 .../qovery-shell-agent/templates/serviceaccount.yaml | 0 .../charts/{ => qovery}/qovery-shell-agent/values.yaml | 0 src/cloud_provider/aws/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/digitalocean/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/helm.rs | 2 +- src/cloud_provider/scaleway/kubernetes/helm_charts.rs | 2 +- 24 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 lib/common/bootstrap/charts/qovery/README.txt rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/.helmignore (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/Chart.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/_helpers.tpl (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/clusterrole.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/clusterrolebinding.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/deployment.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/hpa.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/secret.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/templates/serviceaccount.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-agent/values.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/.helmignore (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/Chart.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/_helpers.tpl (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/clusterrole.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/clusterrolebinding.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/deployment.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/secret.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/templates/serviceaccount.yaml (100%) rename lib/common/bootstrap/charts/{ => qovery}/qovery-shell-agent/values.yaml (100%) diff --git a/lib/common/bootstrap/charts/qovery/README.txt b/lib/common/bootstrap/charts/qovery/README.txt new file mode 100644 index 00000000..0204c686 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/README.txt @@ -0,0 +1,4 @@ +Using a dedicated repository because : + + - Those charts are packaged and upload to a helm repository (with github pages) + - Those charts should be free of tera context/non helm template engine as they are public and should be processable with helm only \ No newline at end of file diff --git a/lib/common/bootstrap/charts/qovery-agent/.helmignore b/lib/common/bootstrap/charts/qovery/qovery-agent/.helmignore similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/.helmignore rename to lib/common/bootstrap/charts/qovery/qovery-agent/.helmignore diff --git a/lib/common/bootstrap/charts/qovery-agent/Chart.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/Chart.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/Chart.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/Chart.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/_helpers.tpl b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/_helpers.tpl similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/_helpers.tpl rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/_helpers.tpl diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/clusterrole.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/clusterrole.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/clusterrole.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/clusterrole.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/clusterrolebinding.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/clusterrolebinding.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/clusterrolebinding.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/clusterrolebinding.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/deployment.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/deployment.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/deployment.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/deployment.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/hpa.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/hpa.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/hpa.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/hpa.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/secret.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/secret.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/secret.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/secret.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/templates/serviceaccount.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/templates/serviceaccount.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/templates/serviceaccount.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/templates/serviceaccount.yaml diff --git a/lib/common/bootstrap/charts/qovery-agent/values.yaml b/lib/common/bootstrap/charts/qovery/qovery-agent/values.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-agent/values.yaml rename to lib/common/bootstrap/charts/qovery/qovery-agent/values.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/.helmignore b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/.helmignore similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/.helmignore rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/.helmignore diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/Chart.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/Chart.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/Chart.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/Chart.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/_helpers.tpl b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/_helpers.tpl similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/_helpers.tpl rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/_helpers.tpl diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/clusterrole.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/clusterrole.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/clusterrole.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/clusterrole.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/clusterrolebinding.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/clusterrolebinding.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/clusterrolebinding.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/clusterrolebinding.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/deployment.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/deployment.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/deployment.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/deployment.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/secret.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/secret.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/secret.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/secret.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/templates/serviceaccount.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/serviceaccount.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/templates/serviceaccount.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/templates/serviceaccount.yaml diff --git a/lib/common/bootstrap/charts/qovery-shell-agent/values.yaml b/lib/common/bootstrap/charts/qovery/qovery-shell-agent/values.yaml similarity index 100% rename from lib/common/bootstrap/charts/qovery-shell-agent/values.yaml rename to lib/common/bootstrap/charts/qovery/qovery-shell-agent/values.yaml diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index ac921c17..077f9254 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -993,7 +993,7 @@ datasources: let mut qovery_agent = CommonChart { chart_info: ChartInfo { name: "qovery-agent".to_string(), - path: chart_path("common/charts/qovery-agent"), + path: chart_path("common/charts/qovery/qovery-agent"), namespace: HelmChartNamespaces::Qovery, values: vec![ ChartSetValue { diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 4bac2c68..b1de34ff 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -814,7 +814,7 @@ datasources: let mut qovery_agent = CommonChart { chart_info: ChartInfo { name: "qovery-agent".to_string(), - path: chart_path("common/charts/qovery-agent"), + path: chart_path("common/charts/qovery/qovery-agent"), namespace: HelmChartNamespaces::Qovery, values: vec![ ChartSetValue { diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 5da089c7..cd128cb9 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -676,7 +676,7 @@ pub fn get_chart_for_shell_agent( let shell_agent = CommonChart { chart_info: ChartInfo { name: "shell-agent".to_string(), - path: chart_path("common/charts/qovery-shell-agent"), + path: chart_path("common/charts/qovery/qovery-shell-agent"), namespace: HelmChartNamespaces::Qovery, values: vec![ ChartSetValue { diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 0e5e6469..b4ee5bf2 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -687,7 +687,7 @@ datasources: let mut qovery_agent = CommonChart { chart_info: ChartInfo { name: "qovery-agent".to_string(), - path: chart_path("common/charts/qovery-agent"), + path: chart_path("common/charts/qovery/qovery-agent"), namespace: HelmChartNamespaces::Qovery, values: vec![ ChartSetValue { From 96b64674e97d089e8b5cd68a795b918082a855b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:59:09 +0200 Subject: [PATCH 029/122] Update charts.yaml --- .github/workflows/charts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml index c9f54f18..ffe93556 100644 --- a/.github/workflows/charts.yaml +++ b/.github/workflows/charts.yaml @@ -12,4 +12,4 @@ jobs: uses: stefanprodan/helm-gh-pages@master with: token: ${{ secrets.GITHUB_TOKEN }} - charts_dir: lib/common/bootstrap/charts/ + charts_dir: lib/common/bootstrap/charts/qovery From 10292c7170ae3e477d772eb794c29585c297ddc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 15:59:17 +0200 Subject: [PATCH 030/122] Update charts.yaml --- .github/workflows/charts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml index ffe93556..ff817bdc 100644 --- a/.github/workflows/charts.yaml +++ b/.github/workflows/charts.yaml @@ -12,4 +12,4 @@ jobs: uses: stefanprodan/helm-gh-pages@master with: token: ${{ secrets.GITHUB_TOKEN }} - charts_dir: lib/common/bootstrap/charts/qovery + charts_dir: lib/common/bootstrap/charts/qovery/ From 7fd7ee7c21caf7f1973ca4f5d242e117de19ff04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 12 Apr 2022 16:30:09 +0200 Subject: [PATCH 031/122] Update charts.yaml --- .github/workflows/charts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/charts.yaml b/.github/workflows/charts.yaml index ff817bdc..5ab80992 100644 --- a/.github/workflows/charts.yaml +++ b/.github/workflows/charts.yaml @@ -1,7 +1,7 @@ name: release-chart on: push: - branches: 'dev' + branches: ['dev', 'main'] jobs: release-chart: From e8f9dd97d616b3d5faba0ccc584df9c51566342e Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 12 Apr 2022 17:22:22 +0200 Subject: [PATCH 032/122] fix: avoid wild commanderror copy (#685) Those might lead to leaking sensitive data. --- .../aws/kubernetes/helm_charts.rs | 33 +++-- src/cloud_provider/aws/kubernetes/mod.rs | 7 +- src/cloud_provider/aws/kubernetes/node.rs | 8 +- src/cloud_provider/aws/kubernetes/roles.rs | 11 +- .../digitalocean/do_api_common.rs | 32 ++--- .../digitalocean/kubernetes/doks_api.rs | 24 ++-- .../digitalocean/kubernetes/helm_charts.rs | 14 +- .../digitalocean/kubernetes/mod.rs | 39 ++++-- .../digitalocean/kubernetes/node.rs | 8 +- .../digitalocean/network/load_balancer.rs | 12 +- .../digitalocean/network/vpc.rs | 12 +- src/cloud_provider/helm.rs | 13 +- src/cloud_provider/kubernetes.rs | 33 ++--- src/cloud_provider/qovery.rs | 10 +- .../scaleway/kubernetes/helm_charts.rs | 16 +-- src/cloud_provider/scaleway/kubernetes/mod.rs | 117 ++++++++-------- .../scaleway/kubernetes/node.rs | 8 +- src/cloud_provider/service.rs | 7 +- src/cmd/helm.rs | 81 ++++++++--- src/cmd/kubectl.rs | 41 +++--- src/cmd/terraform.rs | 50 +++++-- src/errors/io.rs | 8 +- src/errors/mod.rs | 127 +++++++++--------- src/events/mod.rs | 2 +- src/logger.rs | 1 + src/template.rs | 18 ++- test_utilities/src/utilities.rs | 24 ++-- 27 files changed, 407 insertions(+), 349 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 077f9254..9b8fc0cb 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -6,7 +6,7 @@ use crate::cloud_provider::helm::{ }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; use crate::cmd::kubectl::{kubectl_delete_crash_looping_pods, kubectl_exec_get_daemonset, kubectl_exec_with_output}; -use crate::errors::CommandError; +use crate::errors::{CommandError, ErrorMessageVerbosity}; use semver::Version; use serde::{Deserialize, Serialize}; use std::fs::File; @@ -66,10 +66,10 @@ pub fn aws_helm_charts( let content_file = match File::open(&qovery_terraform_config_file) { Ok(x) => x, Err(e) => { - let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + Some(envs.to_vec()), )); } }; @@ -79,10 +79,10 @@ pub fn aws_helm_charts( let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + Some(envs.to_vec()), )); } }; @@ -1359,16 +1359,15 @@ impl AwsVpcCniChart { _ => Ok(false), }, }, - Err(e) => { - let message_safe = format!( - "Error while getting daemonset info for chart {}, won't deploy CNI chart.", - &self.chart_info.name - ); - Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )) - } + Err(e) => Err(CommandError::new( + format!( + "Error while getting daemonset info for chart {}, won't deploy CNI chart. {}", + &self.chart_info.name, + e.message(ErrorMessageVerbosity::SafeOnly) + ), + e.message_raw(), + e.env_vars(), + )), } } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 3104f6f1..7e6b3077 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -890,8 +890,9 @@ impl EKS { match metric.value.parse::() { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { - let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics( + event_details.clone(), + CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); } _ => {} } @@ -1262,7 +1263,7 @@ impl EKS { )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, - CommandError::new(msg, None), + CommandError::new("Error while trying to perform Terraform destroy".to_string(), Some(msg), None), )), } } diff --git a/src/cloud_provider/aws/kubernetes/node.rs b/src/cloud_provider/aws/kubernetes/node.rs index 2a88784f..2a46a27d 100644 --- a/src/cloud_provider/aws/kubernetes/node.rs +++ b/src/cloud_provider/aws/kubernetes/node.rs @@ -70,10 +70,10 @@ impl FromStr for AwsInstancesType { "t3a.medium" => Ok(AwsInstancesType::T3aMedium), "t3a.large" => Ok(AwsInstancesType::T3aLarge), "t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge), - _ => { - let message = format!("`{}` instance type is not supported", s); - Err(CommandError::new(message.clone(), Some(message))) - } + _ => Err(CommandError::new_from_safe_message(format!( + "`{}` instance type is not supported", + s + ))), } } } diff --git a/src/cloud_provider/aws/kubernetes/roles.rs b/src/cloud_provider/aws/kubernetes/roles.rs index e4593425..8f15f4df 100644 --- a/src/cloud_provider/aws/kubernetes/roles.rs +++ b/src/cloud_provider/aws/kubernetes/roles.rs @@ -43,8 +43,9 @@ impl Role { match role { Ok(_) => Ok(true), Err(e) => Err(CommandError::new( - format!("Unable to know if {} exist on AWS account: {:?}", &self.role_name, e), - Some(format!("Unable to know if {} exist on AWS account.", &self.role_name,)), + format!("Unable to know if {} exist on AWS account.", &self.role_name,), + Some(e.to_string()), + None, )), } } @@ -78,10 +79,10 @@ impl Role { return match created { Ok(_) => Ok(true), Err(e) => { - let safe_message = format!("Unable to know if `{}` exist on AWS Account", &self.role_name); return Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message), + format!("Unable to know if `{}` exist on AWS Account", &self.role_name), + Some(e.to_string()), + None, )); } }; diff --git a/src/cloud_provider/digitalocean/do_api_common.rs b/src/cloud_provider/digitalocean/do_api_common.rs index 69471800..078923ce 100644 --- a/src/cloud_provider/digitalocean/do_api_common.rs +++ b/src/cloud_provider/digitalocean/do_api_common.rs @@ -33,28 +33,22 @@ pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Res let res = reqwest::blocking::Client::new().get(url_api).headers(headers).send(); match res { - Ok(response) => { - match response.status() { - StatusCode::OK => Ok(response.text().expect("Cannot get response text")), - StatusCode::UNAUTHORIZED => { - let message_safe = format!( + Ok(response) => match response.status() { + StatusCode::OK => Ok(response.text().expect("Cannot get response text")), + StatusCode::UNAUTHORIZED => { + return Err(CommandError::new( + format!( "Could not get {} information, ensure your DigitalOcean token is valid.", api_type - ); - return Err(CommandError::new( - format!("{}, response: {:?}", message_safe, response), - Some(message_safe), - )); - } - _ => { - let message_safe = format!("Unknown status code received from Digital Ocean Kubernetes API while retrieving {} information.", api_type); - return Err(CommandError::new( - format!("{}, response: {:?}", message_safe, response), - Some(message_safe), - )); - } + ), + Some(format!("response: {:?}", response)), + None, + )); } - } + _ => { + return Err(CommandError::new(format!("Unknown status code received from Digital Ocean Kubernetes API while retrieving {} information.", api_type), Some(format!("response: {:?}", response)), None)); + } + }, Err(_) => Err(CommandError::new_from_safe_message(format!( "Unable to get a response from Digital Ocean {} API", api_type diff --git a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs index 67f3afad..5fd06092 100644 --- a/src/cloud_provider/digitalocean/kubernetes/doks_api.rs +++ b/src/cloud_provider/digitalocean/kubernetes/doks_api.rs @@ -24,13 +24,11 @@ pub fn get_doks_info_from_name( Ok(cluster_info) } - Err(e) => { - let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new( - format!("{}, error: {}", safe_message, e), - Some(safe_message.to_string()), - )); - } + Err(e) => Err(CommandError::new( + "Error while trying to deserialize json received from Digital Ocean DOKS API".to_string(), + Some(e.to_string()), + None, + )), } } @@ -48,13 +46,11 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result Ok(options.options.versions), - Err(e) => { - let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API"; - return Err(CommandError::new( - format!("{}, error: {}", safe_message, e), - Some(safe_message.to_string()), - )); - } + Err(e) => Err(CommandError::new( + "Error while trying to deserialize json received from Digital Ocean DOKS API".to_string(), + Some(e.to_string()), + None, + )), } } diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index b1de34ff..19b09381 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -120,10 +120,10 @@ pub fn do_helm_charts( let content_file = match File::open(&qovery_terraform_config_file) { Ok(x) => x, Err(e) => { - let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + None, )); } }; @@ -133,10 +133,10 @@ pub fn do_helm_charts( let qovery_terraform_config: DigitalOceanQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + None, )); } }; @@ -740,7 +740,7 @@ datasources: }, ChartSetValue { key: "environmentVariables.DO_VOLUME_TIMEOUT".to_string(), - value: 168.to_string(), + value: 168i32.to_string(), }, ChartSetValue { key: "environmentVariables.PLECO_IDENTIFIER".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 58fc9586..4afe105d 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -690,11 +690,13 @@ impl DOKS { ) { Ok(x) => x.to_string(), Err(e) => { - let safe_message = "Load balancer IP wasn't able to be retrieved from UUID on DigitalOcean API and it's required for TLS setup"; return Err(EngineError::new_k8s_loadbalancer_configuration_issue( event_details.clone(), - CommandError::new(e.message(ErrorMessageVerbosity::FullDetails), Some(safe_message.to_string())), - )); + CommandError::new( + format!("Load balancer IP wasn't able to be retrieved from UUID on DigitalOcean API and it's required for TLS setup, {}", e.message_safe()), + e.message_raw(), + e.env_vars(), + ))); } }; @@ -1079,7 +1081,7 @@ impl DOKS { )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, - CommandError::new(msg, None), + CommandError::new("Error while performing Terraform destroy.".to_string(), Some(msg), None), )), } } @@ -1193,7 +1195,11 @@ impl Kubernetes for DOKS { Ok(file) => Ok((StringPath::from(&local_kubeconfig_generated), file)), Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), + CommandError::new( + "Error while trying to open Kubeconfig file.".to_string(), + Some(e.to_string()), + None, + ), )), }, None => { @@ -1221,7 +1227,11 @@ impl Kubernetes for DOKS { .map_err(|err| { EngineError::new_cannot_retrieve_cluster_config_file( event_details.clone(), - CommandError::new(err.to_string(), Some(err.to_string())), + CommandError::new( + "Error while trying to create workspace directory.".to_string(), + Some(err.to_string()), + None, + ), ) }) .expect("Unable to create directory"); @@ -1245,12 +1255,20 @@ impl Kubernetes for DOKS { } Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), + CommandError::new( + "Error while trying to write Kubeconfig file content".to_string(), + Some(e.to_string()), + None, + ), )), }, Err(e) => Err(EngineError::new_cannot_create_file( event_details.clone(), - CommandError::new(e.to_string(), Some(e.to_string())), + CommandError::new( + "Error while trying to create Kubeconfig file.".to_string(), + Some(e.to_string()), + None, + ), )), } } @@ -1259,10 +1277,7 @@ impl Kubernetes for DOKS { match result { Err(e) => Err(EngineError::new_cannot_retrieve_cluster_config_file( event_details, - CommandError::new( - e.message(ErrorMessageVerbosity::FullDetails), - Some(e.message(ErrorMessageVerbosity::SafeOnly)), - ), + e.underlying_error().unwrap_or_default(), )), Ok((file_path, file)) => Ok((file_path, file)), } diff --git a/src/cloud_provider/digitalocean/kubernetes/node.rs b/src/cloud_provider/digitalocean/kubernetes/node.rs index 549fa3eb..62bc4da2 100644 --- a/src/cloud_provider/digitalocean/kubernetes/node.rs +++ b/src/cloud_provider/digitalocean/kubernetes/node.rs @@ -111,10 +111,10 @@ impl FromStr for DoInstancesType { "s-20vcpu-96gb" => Ok(DoInstancesType::S20vcpu96gb), "s-24vcpu-128gb" => Ok(DoInstancesType::S24vcpu128gb), "s-32vcpu-192gb" => Ok(DoInstancesType::S32vcpu192gb), - _ => { - let message = format!("`{}` instance type is not supported", s); - Err(CommandError::new(message.clone(), Some(message))) - } + _ => Err(CommandError::new_from_safe_message(format!( + "`{}` instance type is not supported", + s + ))), } } } diff --git a/src/cloud_provider/digitalocean/network/load_balancer.rs b/src/cloud_provider/digitalocean/network/load_balancer.rs index ace9ca02..993391f2 100644 --- a/src/cloud_provider/digitalocean/network/load_balancer.rs +++ b/src/cloud_provider/digitalocean/network/load_balancer.rs @@ -17,11 +17,12 @@ pub fn get_ip_from_do_load_balancer_api_output(json_content: &str) -> Result match Ipv4Addr::from_str(&lb.load_balancer.ip) { Ok(ip) => Ok(ip), Err(e) => Err(CommandError::new( - e.to_string(), - Some(format!( + format!( "Info returned from DO API is not a valid IP, received '{:?}' instead.", &lb.load_balancer.ip, - )), + ), + Some(e.to_string()), + None, )), }, Err(_) => Err(CommandError::new_from_safe_message( @@ -42,10 +43,9 @@ pub fn do_get_load_balancer_ip(token: &str, load_balancer_id: &str) -> Result Err(CommandError::new( - format!("{:?}", response), - Some( "Unknown status code received from Digital Ocean Kubernetes API while retrieving load balancer information.".to_string(), - ), + Some(format!("response: {:?}", response)), + None, )), }, Err(_) => { diff --git a/src/cloud_provider/digitalocean/network/vpc.rs b/src/cloud_provider/digitalocean/network/vpc.rs index 76225984..b720fd7d 100644 --- a/src/cloud_provider/digitalocean/network/vpc.rs +++ b/src/cloud_provider/digitalocean/network/vpc.rs @@ -126,13 +126,11 @@ fn do_get_vpcs_from_api_output(json_content: &str) -> Result, CommandEr match res_vpcs { Ok(vpcs) => Ok(vpcs.vpcs), - Err(e) => { - let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API"; - Err(CommandError::new( - format!("{}, error: {}", message_safe, e), - Some(message_safe.to_string()), - )) - } + Err(e) => Err(CommandError::new( + "Error while trying to deserialize json received from Digital Ocean VPC API".to_string(), + Some(e.to_string()), + None, + )), } } diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index cd128cb9..fe22b7e8 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -155,11 +155,10 @@ pub trait HelmChart: Send { let chart = self.get_chart_info(); for file in chart.values_files.iter() { if let Err(e) = fs::metadata(file) { - let safe_message = - format!("Can't access helm chart override file `{}` for chart `{}`", file, chart.name,); return Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message), + format!("Can't access helm chart override file `{}` for chart `{}`", file, chart.name,), + Some(e.to_string()), + None, )); } } @@ -304,10 +303,10 @@ fn deploy_parallel_charts( } } Err(e) => { - let safe_message = "Thread panicked during parallel charts deployments."; let error = Err(CommandError::new( - format!("{}, error: {:?}", safe_message, e), - Some(safe_message.to_string()), + "Thread panicked during parallel charts deployments.".to_string(), + Some(format!("{:?}", e)), + None, )); errors.push(error); } diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index cd89c9a1..5b05ee65 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -181,13 +181,7 @@ pub trait Kubernetes: Listen { ) { Ok(k) => k, Err(err) => { - let error = EngineError::new_cannot_get_cluster_nodes( - self.get_event_details(stage), - CommandError::new( - err.message(ErrorMessageVerbosity::FullDetails), - Some("Error while trying to get cluster nodes.".to_string()), - ), - ); + let error = EngineError::new_cannot_get_cluster_nodes(self.get_event_details(stage), err); self.logger().log(EngineEvent::Error(error.clone(), None)); @@ -267,12 +261,7 @@ pub trait Kubernetes: Listen { { let kubeconfig = match self.get_kubeconfig_file() { Ok((path, _)) => path, - Err(e) => { - return Err(CommandError::new( - e.message(ErrorMessageVerbosity::FullDetails), - Some(e.message(ErrorMessageVerbosity::SafeOnly)), - )) - } + Err(e) => return Err(e.underlying_error().unwrap_or_default()), }; send_progress_on_long_task(self, Action::Create, || { @@ -313,7 +302,10 @@ pub trait Kubernetes: Listen { format!("bootstrap/{}", self.id()), ) .map_err(|err| { - EngineError::new_cannot_get_workspace_directory(event_details, CommandError::new(err.to_string(), None)) + EngineError::new_cannot_get_workspace_directory( + event_details, + CommandError::new("Error creating workspace directory.".to_string(), Some(err.to_string()), None), + ) }) } @@ -1404,11 +1396,9 @@ pub fn convert_k8s_cpu_value_to_f32(value: String) -> Result Ok(n * 0.001) // return in milli cpu the value } Err(e) => Err(CommandError::new( - e.to_string(), - Some(format!( - "Error while trying to parse `{}` to float 32.", - value_number_string.as_str() - )), + format!("Error while trying to parse `{}` to float 32.", value_number_string.as_str()), + Some(e.to_string()), + None, )), }; } @@ -1416,8 +1406,9 @@ pub fn convert_k8s_cpu_value_to_f32(value: String) -> Result match value.parse::() { Ok(n) => Ok(n), Err(e) => Err(CommandError::new( - e.to_string(), - Some(format!("Error while trying to parse `{}` to float 32.", value.as_str())), + format!("Error while trying to parse `{}` to float 32.", value.as_str()), + Some(e.to_string()), + None, )), } } diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index d9a27611..92943863 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -62,14 +62,8 @@ pub fn get_qovery_app_version( match reqwest::blocking::Client::new().get(&url).headers(headers).send() { Ok(x) => match x.json::() { Ok(qa) => Ok(qa), - Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )), + Err(e) => Err(CommandError::new(message_safe, Some(e.to_string()), None)), }, - Err(e) => Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), - )), + Err(e) => Err(CommandError::new(message_safe, Some(e.to_string()), None)), } } diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index b4ee5bf2..9bea4c13 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -108,17 +108,17 @@ pub fn scw_helm_charts( chart_config_prerequisites: &ChartsConfigPrerequisites, chart_prefix_path: Option<&str>, _kubernetes_config: &Path, - _envs: &[(String, String)], + envs: &[(String, String)], ) -> Result>>, CommandError> { info!("preparing chart configuration to be deployed"); let content_file = match File::open(&qovery_terraform_config_file) { Ok(x) => x, Err(e) => { - let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?"; return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe.to_string()), + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + Some(envs.to_vec()), )); } }; @@ -128,10 +128,10 @@ pub fn scw_helm_charts( let qovery_terraform_config: ScalewayQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { - let message_safe = format!("Error while parsing terraform config file {}", qovery_terraform_config_file); return Err(CommandError::new( - format!("{}, error: {:?}", message_safe, e), - Some(message_safe), + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + Some(envs.to_vec()), )); } }; @@ -655,7 +655,7 @@ datasources: }, ChartSetValue { key: "environmentVariables.SCW_VOLUME_TIMEOUT".to_string(), - value: 24.to_string(), + value: 24i32.to_string(), }, ChartSetValue { key: "environmentVariables.LOG_LEVEL".to_string(), diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index fc8b6027..cb7fecd1 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -238,10 +238,13 @@ impl Kapsule { )) { Ok(x) => x, Err(e) => { - let msg = format!("wasn't able to retrieve SCW cluster information from the API. {:?}", e); return Err(EngineError::new_cannot_get_cluster_error( event_details, - CommandError::new(msg.clone(), Some(msg)), + CommandError::new( + "Error, wasn't able to retrieve SCW cluster information from the API.".to_string(), + Some(e.to_string()), + None, + ), )); } }; @@ -251,13 +254,12 @@ impl Kapsule { if cluster_info_content.is_empty() { return Ok(None); } else if cluster_info_content.len() != 1_usize { - let msg = format!( - "too many clusters found with this name, where 1 was expected. {:?}", - &cluster_info_content.len() - ); return Err(EngineError::new_multiple_cluster_found_expected_one_error( event_details, - CommandError::new(msg.clone(), Some(msg)), + CommandError::new_from_safe_message(format!( + "Error, too many clusters found ({}) with this name, where 1 was expected.", + &cluster_info_content.len() + )), )); } @@ -290,34 +292,35 @@ impl Kapsule { )) { Ok(x) => x, Err(e) => { - let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id); - let msg_with_error = format!("{}. {:?}", msg, e); return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( - msg_with_error, - Some(msg), + format!("Error while trying to get SCW pool info from cluster {}.", &cluster_id), + Some(e.to_string()), + None, ))); } }; // ensure pool are present if pools.pools.is_none() { - let msg = format!( - "No SCW pool found from the SCW API for cluster {}/{}", - &cluster_id, - &cluster_info.name.unwrap_or_else(|| "unknown cluster".to_string()) - ); - return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new(msg.clone(), Some(msg)))); + return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new_from_safe_message( + format!( + "Error, no SCW pool found from the SCW API for cluster {}/{}", + &cluster_id, + &cluster_info.name.unwrap_or_else(|| "unknown cluster".to_string()) + ), + ))); } // create sanitized nodegroup pools let mut nodegroup_pool: Vec = Vec::with_capacity(pools.total_count.unwrap_or(0 as f32) as usize); for ng in pools.pools.unwrap() { if ng.id.is_none() { - let msg = format!("error while trying to validate SCW pool ID from cluster {}", &cluster_id); - return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new( - msg.clone(), - Some(msg), - ))); + return Err(ScwNodeGroupErrors::NodeGroupValidationError( + CommandError::new_from_safe_message(format!( + "Error while trying to validate SCW pool ID from cluster {}", + &cluster_id + )), + )); } let ng_sanitized = self.get_node_group_info(ng.id.unwrap().as_str())?; nodegroup_pool.push(ng_sanitized) @@ -327,35 +330,38 @@ impl Kapsule { } fn get_node_group_info(&self, pool_id: &str) -> Result { - let pool = match block_on(scaleway_api_rs::apis::pools_api::get_pool( - &self.get_configuration(), - self.region().as_str(), - pool_id, - )) { - Ok(x) => x, - Err(e) => { - return Err(match e { + let pool = + match block_on(scaleway_api_rs::apis::pools_api::get_pool( + &self.get_configuration(), + self.region().as_str(), + pool_id, + )) { + Ok(x) => x, + Err(e) => return Err(match e { Error::ResponseError(x) => { let msg_with_error = format!("Error code while getting node group: {}, API message: {} ", x.status, x.content); match x.status { StatusCode::NOT_FOUND => ScwNodeGroupErrors::NoNodePoolFound(CommandError::new( - msg_with_error, - Some("No node pool found".to_string()), + "No node pool found".to_string(), + Some(msg_with_error), + None, )), _ => ScwNodeGroupErrors::CloudProviderApiError(CommandError::new( - msg_with_error, - Some("Scaleway API error while trying to get node group".to_string()), + "Scaleway API error while trying to get node group".to_string(), + Some(msg_with_error), + None, )), } } - _ => { - let msg = "This Scaleway API error is not supported in the engine, please add it to better support it".to_string(); - ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new(msg.clone(), Some(msg))) - } - }) - } - }; + _ => ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new( + "This Scaleway API error is not supported in the engine, please add it to better support it" + .to_string(), + Some(e.to_string()), + None, + )), + }), + }; // ensure there is no missing info if let Err(e) = self.check_missing_nodegroup_info(&pool.name, "name") { @@ -740,10 +746,9 @@ impl Kapsule { let cluster_info = self.get_scw_cluster_info()?; if cluster_info.is_none() { - let msg = "no cluster found from the Scaleway API".to_string(); return Err(EngineError::new_no_cluster_found_error( event_details, - CommandError::new(msg.clone(), Some(msg)), + CommandError::new_from_safe_message("Error, no cluster found from the Scaleway API".to_string()), )); } @@ -762,27 +767,29 @@ impl Kapsule { ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe( - "cluster do not exists, no node groups can be retrieved for upgrade check".to_string(), + "Cluster do not exists, no node groups can be retrieved for upgrade check.".to_string(), ), )), ScwNodeGroupErrors::MultipleClusterFound => { - let msg = "multiple clusters found, can't match the correct node groups".to_string(); return Err(EngineError::new_multiple_cluster_found_expected_one_error( event_details, - CommandError::new(msg.clone(), Some(msg)), + CommandError::new_from_safe_message( + "Error, multiple clusters found, can't match the correct node groups.".to_string(), + ), )); } ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log(EngineEvent::Warning( event_details.clone(), EventMessage::new_from_safe( - "cluster exists, but no node groups found for upgrade check".to_string(), + "Cluster exists, but no node groups found for upgrade check.".to_string(), ), )), ScwNodeGroupErrors::MissingNodePoolInfo => { - let msg = "Error with Scaleway API while trying to retrieve node pool info".to_string(); return Err(EngineError::new_missing_api_info_from_cloud_provider_error( event_details, - Some(CommandError::new_from_safe_message(msg)), + Some(CommandError::new_from_safe_message( + "Error with Scaleway API while trying to retrieve node pool info".to_string(), + )), )); } ScwNodeGroupErrors::NodeGroupValidationError(c) => { @@ -800,7 +807,7 @@ impl Kapsule { self.logger.log(EngineEvent::Info( event_details.clone(), EventMessage::new_from_safe( - "ensuring all groups nodes are in ready state from the Scaleway API".to_string(), + "Ensuring all groups nodes are in ready state from the Scaleway API".to_string(), ), )); @@ -893,7 +900,7 @@ impl Kapsule { Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_k8s_node_not_ready( event_details, - CommandError::new(msg, Some("Waiting for too long worker nodes to be ready".to_string())), + CommandError::new("Waiting for too long worker nodes to be ready".to_string(), Some(msg), None), )) } } @@ -1122,8 +1129,7 @@ impl Kapsule { match metric.value.parse::() { Ok(job_count) if job_count > 0 => current_engine_jobs += 1, Err(e) => { - let safe_message = "Error while looking at the API metric value"; - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(format!("{}, error: {}", safe_message, e), Some(safe_message.to_string())))); + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); } _ => {} } @@ -1136,9 +1142,8 @@ impl Kapsule { } } Err(e) => { - let safe_message = format!("Error while looking at the API metric value {}", metric_name); OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), CommandError::new(e.message(ErrorMessageVerbosity::FullDetails), Some(safe_message)))) + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), e)) } }; }); @@ -1481,7 +1486,7 @@ impl Kapsule { )), Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( event_details, - CommandError::new(msg, None), + CommandError::new("Error while performing Terraform destroy.".to_string(), Some(msg), None), )), } } diff --git a/src/cloud_provider/scaleway/kubernetes/node.rs b/src/cloud_provider/scaleway/kubernetes/node.rs index 8cd5419c..d1725c0f 100644 --- a/src/cloud_provider/scaleway/kubernetes/node.rs +++ b/src/cloud_provider/scaleway/kubernetes/node.rs @@ -80,10 +80,10 @@ impl FromStr for ScwInstancesType { "dev1-l" => Ok(ScwInstancesType::Dev1L), "dev1-xl" => Ok(ScwInstancesType::Dev1Xl), "render-s" => Ok(ScwInstancesType::RenderS), - _ => { - let message = format!("`{}` instance type is not supported", s); - Err(CommandError::new(message.clone(), Some(message))) - } + _ => Err(CommandError::new_from_safe_message(format!( + "`{}` instance type is not supported", + s + ))), } } } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index aed0b930..2593d5d9 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -17,7 +17,7 @@ use crate::cmd::helm; use crate::cmd::kubectl::ScalingKind::Statefulset; use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replicas_by_selector, ScalingKind}; use crate::cmd::structs::LabelsContent; -use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; +use crate::errors::{CommandError, EngineError}; use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, Stage, ToTransmitter}; use crate::io_models::ProgressLevel::Info; use crate::io_models::{ @@ -1056,10 +1056,7 @@ where Err(EngineError::new_k8s_service_issue( event_details, - CommandError::new( - err.message(ErrorMessageVerbosity::FullDetails), - Some("Error with Kubernetes service".to_string()), - ), + err.underlying_error().unwrap_or_default(), )) } _ => { diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index e857a916..daf0f0f2 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -138,7 +138,11 @@ impl Helm { Err(_) if stderr.contains("release: not found") => Err(ReleaseDoesNotExist(chart.name.clone())), Err(err) => { stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); - let error = CommandError::new(stderr, err.message_safe()); + let error = CommandError::new( + err.message_safe(), + Some(stderr), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ); Err(CmdError(chart.name.clone(), STATUS, error)) } Ok(_) => { @@ -175,7 +179,11 @@ impl Helm { match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); - let error = CommandError::new(stderr, err.message_safe()); + let error = CommandError::new( + err.message_safe(), + Some(stderr), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ); Err(CmdError(chart.name.clone(), ROLLBACK, error)) } Ok(_) => Ok(()), @@ -208,7 +216,11 @@ impl Helm { match helm_exec_with_output(&args, &self.get_all_envs(envs), &mut |_| {}, &mut |line| stderr.push_str(&line)) { Err(err) => { stderr.push_str(&err.message(ErrorMessageVerbosity::FullDetails)); - let error = CommandError::new(stderr, err.message_safe()); + let error = CommandError::new( + err.message_safe(), + Some(stderr), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ); Err(CmdError(chart.name.clone(), UNINSTALL, error)) } Ok(_) => Ok(()), @@ -279,14 +291,19 @@ impl Helm { Ok(helms_charts) } - Err(e) => { - let message_safe = "Error while deserializing all helms names"; - Err(HelmError::CmdError( - "none".to_string(), - LIST, - CommandError::new(format!("{}, error: {}", message_safe, e), Some(message_safe.to_string())), - )) - } + Err(e) => Err(HelmError::CmdError( + "none".to_string(), + LIST, + CommandError::new( + "Error while deserializing all helms names".to_string(), + Some(e.to_string()), + Some( + envs.iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect::>(), + ), + ), + )), } } @@ -338,10 +355,14 @@ impl Helm { // no need to validate yaml as it will be done by helm if let Err(e) = file_create() { - let safe_message = format!("Error while writing yaml content to file `{}`", &file_path); let cmd_err = CommandError::new( - format!("{}\nContent\n{}\nError: {}", safe_message, value_file.yaml_content, e), - Some(safe_message), + format!("Error while writing yaml content to file `{}`", &file_path), + Some(format!("Content\n{}\nError: {}", value_file.yaml_content, e)), + Some( + envs.iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect::>(), + ), ); return Err(HelmError::CmdError(chart.name.clone(), HelmCommand::UPGRADE, cmd_err)); }; @@ -375,7 +396,11 @@ impl Helm { Err(CmdError( chart.name.clone(), HelmCommand::DIFF, - CommandError::new(stderr_msg.clone(), Some(stderr_msg)), + CommandError::new( + "Helm error".to_string(), + Some(stderr_msg), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ), )) } } @@ -445,10 +470,14 @@ impl Helm { // no need to validate yaml as it will be done by helm if let Err(e) = file_create() { - let safe_message = format!("Error while writing yaml content to file `{}`", &file_path); let cmd_err = CommandError::new( - format!("{}\nContent\n{}\nError: {}", safe_message, value_file.yaml_content, e), - Some(safe_message), + format!("Error while writing yaml content to file `{}`", &file_path), + Some(format!("Content\n{}\nError: {}", value_file.yaml_content, e)), + Some( + envs.iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect::>(), + ), ); return Err(HelmError::CmdError(chart.name.clone(), HelmCommand::UPGRADE, cmd_err)); }; @@ -494,7 +523,11 @@ impl Helm { CmdError( chart.name.clone(), HelmCommand::UPGRADE, - CommandError::new(stderr_msg.clone(), Some(stderr_msg)), + CommandError::new( + "Helm error".to_string(), + Some(stderr_msg), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ), ) }; @@ -541,7 +574,15 @@ where // It means that the command successfully ran, but it didn't terminate as expected let mut cmd = QoveryCommand::new("helm", args, envs); match cmd.exec_with_output(stdout_output, stderr_output) { - Err(err) => Err(CommandError::new(format!("{:?}", err), None)), + Err(err) => Err(CommandError::new( + "Error while executing Helm command.".to_string(), + Some(format!("{:?}", err)), + Some( + envs.iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect::>(), + ), + )), _ => Ok(()), } } diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 7318f75e..339a8261 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -1167,37 +1167,36 @@ where P: AsRef, T: DeserializeOwned, { - let mut _envs = Vec::with_capacity(envs.len() + 1); - _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); - _envs.extend(envs); + let mut extended_envs = Vec::with_capacity(envs.len() + 1); + extended_envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); + extended_envs.extend(envs); let mut output_vec: Vec = Vec::with_capacity(50); - let _ = kubectl_exec_with_output(args.clone(), _envs.clone(), &mut |line| output_vec.push(line), &mut |line| { - error!("{}", line) - })?; + let mut err_vec = Vec::new(); + let _ = kubectl_exec_with_output( + args.clone(), + extended_envs.clone(), + &mut |line| output_vec.push(line), + &mut |line| { + err_vec.push(line.to_string()); + error!("{}", line) + }, + )?; let output_string: String = output_vec.join(""); let result = match serde_json::from_str::(output_string.as_str()) { Ok(x) => x, Err(err) => { - let args_string = args.join(" "); - let mut env_vars_in_vec = Vec::new(); - let _ = _envs.into_iter().map(|x| { - env_vars_in_vec.push(x.0.to_string()); - env_vars_in_vec.push(x.1.to_string()); - }); - let environment_variables = env_vars_in_vec.join(" "); return Err(CommandError::new( - format!( - "JSON parsing error on {:?} on command: {} kubectl {}, output: {}. {:?}", - std::any::type_name::(), - environment_variables, - args_string, - output_string, - err + "JSON parsing error on kubectl command.".to_string(), + Some(err.to_string()), + Some( + extended_envs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect::>(), ), - Some("JSON parsing error on kubectl command.".to_string()), )); } }; diff --git a/src/cmd/terraform.rs b/src/cmd/terraform.rs index d20ef391..82b33a9d 100644 --- a/src/cmd/terraform.rs +++ b/src/cmd/terraform.rs @@ -31,10 +31,8 @@ fn manage_common_issues(terraform_provider_lock: &str, err: &CommandError) -> Re Ok(_) => Ok(()), Err(e) => Err(CommandError::new( format!("Wasn't able to delete terraform lock file {}", &terraform_provider_lock), - Some(format!( - "Wasn't able to delete terraform lock file {}, error: {:?}", - &terraform_provider_lock, e - )), + Some(e.to_string()), + None, )), }; } else if err @@ -78,7 +76,11 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> { match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), - Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), + Err(retry::Error::Internal(e)) => Err(CommandError::new( + "Error while performing Terraform validate.".to_string(), + Some(e), + None, + )), } } @@ -103,7 +105,11 @@ pub fn terraform_init_validate_plan_apply(root_dir: &str, dry_run: bool) -> Resu return match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), - Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), + Err(retry::Error::Internal(e)) => Err(CommandError::new( + "Error while performing Terraform validate.".to_string(), + Some(e), + None, + )), }; } @@ -135,7 +141,11 @@ pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy: match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), - Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), + Err(retry::Error::Internal(e)) => Err(CommandError::new( + "Error while performing Terraform destroy".to_string(), + Some(e), + None, + )), } } @@ -159,7 +169,11 @@ fn terraform_plan_apply(root_dir: &str) -> Result<(), CommandError> { match result { Ok(_) => Ok(()), Err(Operation { error, .. }) => Err(error), - Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), + Err(retry::Error::Internal(e)) => Err(CommandError::new( + "Error while performing Terraform plan and apply.".to_string(), + Some(e), + None, + )), } } @@ -183,7 +197,11 @@ pub fn terraform_init_validate_state_list(root_dir: &str) -> Result, match result { Ok(output) => Ok(output), Err(Operation { error, .. }) => Err(error), - Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)), + Err(retry::Error::Internal(e)) => Err(CommandError::new( + "Error while performing Terraform state list.".to_string(), + Some(e), + None, + )), } } @@ -199,7 +217,8 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co let mut stdout = Vec::new(); let mut stderr = Vec::new(); - let mut cmd = QoveryCommand::new("terraform", &args, &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())]); + let envs = &[(TF_PLUGIN_CACHE_DIR, tf_plugin_cache_dir_value.as_str())]; + let mut cmd = QoveryCommand::new("terraform", &args, envs); cmd.set_current_dir(root_dir); let result = cmd.exec_with_output( @@ -213,11 +232,18 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result, Co }, ); - stdout.extend(stderr); + stdout.extend(stderr.clone()); match result { Ok(_) => Ok(stdout), - Err(_) => Err(CommandError::new(stdout.join("\n"), None)), + Err(_) => Err(CommandError::new_from_command_line( + "Error while performing Terraform command.".to_string(), + "terraform".to_string(), + args.iter().map(|e| e.to_string()).collect(), + envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect(), + Some(stdout.join(" ")), + Some(stderr.join(" ")), + )), } } diff --git a/src/errors/io.rs b/src/errors/io.rs index d743824a..fa895a2c 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -12,8 +12,8 @@ pub struct CommandError { impl From for CommandError { fn from(error: errors::CommandError) -> Self { CommandError { - message: error.message_safe.unwrap_or_default(), - full_details: error.full_details, + message: error.message_safe, + full_details: error.full_details.unwrap_or_default(), } } } @@ -235,7 +235,7 @@ pub struct EngineError { event_details: EventDetails, qovery_log_message: String, user_log_message: String, - message: Option, + underlying_error: Option, link: Option, hint_message: Option, } @@ -247,7 +247,7 @@ impl From for EngineError { event_details: EventDetails::from(error.event_details), qovery_log_message: error.qovery_log_message, user_log_message: error.user_log_message, - message: error.message.map(CommandError::from), + underlying_error: error.underlying_error.map(CommandError::from), link: error.link.map(|url| url.to_string()), hint_message: error.hint_message, } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 91f9ff79..9a0a88b7 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -30,9 +30,9 @@ pub enum ErrorMessageVerbosity { #[derivative(Debug)] pub struct CommandError { /// full_details: full error message, can contains unsafe text such as passwords and tokens. - full_details: String, + full_details: Option, /// message_safe: error message omitting displaying any protected data such as passwords and tokens. - message_safe: Option, + message_safe: String, /// env_vars: environments variables including touchy data such as secret keys. /// env_vars field is ignored from any wild Debug printing because of it touchy data it carries. #[derivative(Debug = "ignore")] @@ -41,13 +41,13 @@ pub struct CommandError { impl CommandError { /// Returns CommandError message_raw. May contains unsafe text such as passwords and tokens. - pub fn message_raw(&self) -> String { - self.full_details.to_string() + pub fn message_raw(&self) -> Option { + self.full_details.clone() } /// Returns CommandError message_safe omitting all unsafe text such as passwords and tokens. - pub fn message_safe(&self) -> Option { - self.message_safe.clone() + pub fn message_safe(&self) -> String { + self.message_safe.to_string() } /// Returns CommandError env_vars. @@ -58,23 +58,20 @@ impl CommandError { /// Returns error message based on verbosity. pub fn message(&self, message_verbosity: ErrorMessageVerbosity) -> String { match message_verbosity { - ErrorMessageVerbosity::SafeOnly => match &self.message_safe { - None => "".to_string(), - Some(msg) => msg.to_string(), + ErrorMessageVerbosity::SafeOnly => self.message_safe.to_string(), + ErrorMessageVerbosity::FullDetailsWithoutEnvVars => match &self.full_details { + None => self.message(ErrorMessageVerbosity::SafeOnly), + Some(full_details) => format!("{} / Full details: {}", self.message_safe, full_details), }, - ErrorMessageVerbosity::FullDetailsWithoutEnvVars => match &self.message_safe { - None => self.full_details.to_string(), - Some(safe) => format!("{} / Full details: {}", safe, self.full_details), - }, - ErrorMessageVerbosity::FullDetails => match &self.message_safe { - None => self.full_details.to_string(), - Some(safe) => match &self.env_vars { - None => format!("{} / Full details: {}", safe, self.full_details), + ErrorMessageVerbosity::FullDetails => match &self.full_details { + None => self.message(ErrorMessageVerbosity::SafeOnly), + Some(full_details) => match &self.env_vars { + None => format!("{} / Full details: {}", self.message_safe, full_details), Some(env_vars) => { format!( "{} / Full details: {} / Env vars: {}", - safe, - self.full_details, + self.message_safe, + full_details, env_vars .iter() .map(|(k, v)| format!("{}={}", k, v)) @@ -89,24 +86,11 @@ impl CommandError { /// Creates a new CommandError from safe message. To be used when message is safe. pub fn new_from_safe_message(message: String) -> Self { - CommandError::new(message.clone(), Some(message)) + CommandError::new(message, None, None) } - /// Creates a new CommandError having both a safe and an unsafe message. - pub fn new(message_raw: String, message_safe: Option) -> Self { - CommandError { - full_details: message_raw, - message_safe, - env_vars: None, - } - } - - /// Creates a new CommandError having a safe, an unsafe message and env vars. - pub fn new_with_env_vars( - message_raw: String, - message_safe: Option, - env_vars: Option>, - ) -> Self { + /// Creates a new CommandError having both a safe, an unsafe message and env vars. + pub fn new(message_safe: String, message_raw: Option, env_vars: Option>) -> Self { CommandError { full_details: message_raw, message_safe, @@ -120,8 +104,8 @@ impl CommandError { safe_message: Option, ) -> Self { CommandError { - full_details: legacy_command_error.to_string(), - message_safe: safe_message, + full_details: Some(legacy_command_error.to_string()), + message_safe: safe_message.unwrap_or_else(|| "No message".to_string()), env_vars: None, } } @@ -144,7 +128,17 @@ impl CommandError { unsafe_message = format!("{}\nSTDERR {}", unsafe_message, txt); } - CommandError::new_with_env_vars(unsafe_message, Some(message), Some(envs)) + CommandError::new(message, Some(unsafe_message), Some(envs)) + } +} + +impl Default for CommandError { + fn default() -> Self { + Self { + full_details: None, + message_safe: "Unknown command error".to_string(), + env_vars: None, + } } } @@ -370,8 +364,8 @@ pub struct EngineError { qovery_log_message: String, /// user_log_message: message targeted toward Qovery users, might avoid any useless info for users such as Qovery specific identifiers and so on. user_log_message: String, - /// raw_message: raw error message such as command input / output. - message: Option, + /// underlying_error: raw error message such as command input / output. + underlying_error: Option, /// link: link to error documentation (qovery blog, forum, etc.) link: Option, /// hint_message: an hint message aiming to give an hint to the user. For example: "Happens when application port has been changed but application hasn't been restarted.". @@ -401,12 +395,17 @@ impl EngineError { /// Returns proper error message. pub fn message(&self, message_verbosity: ErrorMessageVerbosity) -> String { - match &self.message { + match &self.underlying_error { Some(msg) => msg.message(message_verbosity), None => self.qovery_log_message.to_string(), } } + /// Returns Engine's underlying error. + pub fn underlying_error(&self) -> Option { + self.underlying_error.clone() + } + /// Returns error's link. pub fn link(&self) -> &Option { &self.link @@ -443,7 +442,7 @@ impl EngineError { tag, qovery_log_message, user_log_message, - message, + underlying_error: message, link, hint_message, } @@ -477,7 +476,7 @@ impl EngineError { ), qovery_log_message: message.to_string(), user_log_message: message, - message: None, + underlying_error: None, link: None, hint_message: None, } @@ -2884,9 +2883,9 @@ mod tests { #[test] fn test_command_error_test_hidding_env_vars_in_message_safe_only() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); @@ -2901,9 +2900,9 @@ mod tests { #[test] fn test_command_error_test_hidding_env_vars_in_message_full_without_env_vars() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); @@ -2918,9 +2917,9 @@ mod tests { #[test] fn test_engine_error_test_hidding_env_vars_in_message_safe_only() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); let cluster_id = QoveryIdentifier::new_random(); @@ -2936,7 +2935,7 @@ mod tests { ), "qovery_log_message".to_string(), "user_log_message".to_string(), - Some(command_err.clone()), + Some(command_err), None, None, ); @@ -2952,9 +2951,9 @@ mod tests { #[test] fn test_engine_error_test_hidding_env_vars_in_message_full_without_env_vars() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); let cluster_id = QoveryIdentifier::new_random(); @@ -2970,7 +2969,7 @@ mod tests { ), "qovery_log_message".to_string(), "user_log_message".to_string(), - Some(command_err.clone()), + Some(command_err), None, None, ); @@ -2986,9 +2985,9 @@ mod tests { #[test] fn test_command_error_test_hidding_env_vars_in_debug() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); @@ -3003,9 +3002,9 @@ mod tests { #[test] fn test_engine_error_test_hidding_env_vars_in_debug() { // setup: - let command_err = CommandError::new_with_env_vars( - "my raw message".to_string(), - Some("my safe message".to_string()), + let command_err = CommandError::new( + "my safe message".to_string(), + Some("my raw message".to_string()), Some(vec![("my_secret".to_string(), "my_secret_value".to_string())]), ); let cluster_id = QoveryIdentifier::new_random(); @@ -3021,7 +3020,7 @@ mod tests { ), "qovery_log_message".to_string(), "user_log_message".to_string(), - Some(command_err.clone()), + Some(command_err), None, None, ); diff --git a/src/events/mod.rs b/src/events/mod.rs index 3cb36db7..5867a948 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -162,7 +162,7 @@ impl EventMessage { impl From for EventMessage { fn from(e: CommandError) -> Self { - EventMessage::new_with_env_vars(e.message_raw(), e.message_safe(), e.env_vars()) + EventMessage::new_with_env_vars(e.message_safe(), e.message_raw(), e.env_vars()) } } diff --git a/src/logger.rs b/src/logger.rs index 4497f4b9..cec6dc6a 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -122,6 +122,7 @@ mod tests { Some(errors::CommandError::new( safe_message.to_string(), Some(raw_message.to_string()), + None, )), Some(link), Some(hint.to_string()), diff --git a/src/template.rs b/src/template.rs index 670da04c..6f366984 100644 --- a/src/template.rs +++ b/src/template.rs @@ -46,7 +46,7 @@ where tera::ErrorKind::Utf8Conversion { .. } => "utf-8 conversion issue".to_string(), }; - return Err(CommandError::new(context.into_json().to_string(), Some(error_msg))); + return Err(CommandError::new(error_msg, Some(context.into_json().to_string()), None)); } }; @@ -65,7 +65,11 @@ where P: AsRef, { match crate::fs::copy_files(from.as_ref(), to.as_ref(), true) { - Err(err) => Err(CommandError::new(err.to_string(), None)), + Err(err) => Err(CommandError::new( + "Error copying template files.".to_string(), + Some(err.to_string()), + None, + )), Ok(x) => Ok(x), } } @@ -121,11 +125,17 @@ pub fn write_rendered_templates(rendered_templates: &[RenderedTemplate], into: & let _ = fs::remove_file(dest.as_str()); // create an empty file - let mut f = fs::File::create(&dest).map_err(|e| CommandError::new(e.to_string(), None))?; + let mut f = fs::File::create(&dest).map_err(|e| { + CommandError::new( + "Error while creating template destination file.".to_string(), + Some(e.to_string()), + None, + ) + })?; // write rendered template into the new file f.write_all(rt.content.as_bytes()) - .map_err(|e| CommandError::new(e.to_string(), None))?; + .map_err(|e| CommandError::new("Error while rendering template.".to_string(), Some(e.to_string()), None))?; // perform specific action based on the extension let extension = Path::new(&dest).extension().and_then(OsStr::to_str); diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 176df25c..93963e8f 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -543,10 +543,7 @@ where }; match kubeconfig { - None => Err(CommandError::new( - "No kubeconfig found".to_string(), - Some("No kubeconfig found".to_string()), - )), + None => Err(CommandError::new_from_safe_message("No kubeconfig found".to_string())), Some(file_content) => { let _ = "test"; Ok(file_content) @@ -581,10 +578,10 @@ where )); if let Err(e) = clusters_res { - let message_safe = "Error while trying to get clusters"; return OperationResult::Retry(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e.to_string()), - Some(message_safe.to_string()), + "Error while trying to get clusters".to_string(), + Some(e.to_string()), + None, )); } @@ -626,8 +623,9 @@ where Err(e) => { let message_safe = "Error while trying to get clusters"; return OperationResult::Retry(CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e.to_string()), - Some(message_safe.to_string()), + message_safe.to_string(), + Some(e.to_string()), + None, )); } }; @@ -659,13 +657,7 @@ where .write(true) .truncate(true) .open(file_path.as_ref()) - .map_err(|e| { - let message_safe = format!("Error opening kubeconfig file."); - CommandError::new( - format!("{}, error: {}", message_safe.to_string(), e.to_string()), - Some(message_safe.to_string()), - ) - })?; + .map_err(|e| CommandError::new("Error opening kubeconfig file.".to_string(), Some(e.to_string()), None))?; let _ = kubernetes_config_file .write_all(file_content.as_bytes()) .map_err(|_| CommandError::new_from_safe_message("Error while trying to write into file.".to_string()))?; From d2e963da4e10696e69575881b945af22e734d7ac Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 13 Apr 2022 11:53:24 +0200 Subject: [PATCH 033/122] refactor: update docker disk purger percentage --- src/build_platform/local_docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index 454057ba..bab2eff3 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -73,7 +73,7 @@ impl LocalDocker { } // arbitrary percentage that should make the job anytime - const DISK_FREE_SPACE_PERCENTAGE_BEFORE_PURGE: u64 = 20; + const DISK_FREE_SPACE_PERCENTAGE_BEFORE_PURGE: u64 = 40; let mount_points_to_check = vec![Path::new("/var/lib/docker"), Path::new("/")]; let mut disk_free_space_percent: u64 = 100; From 00346039dc2de137488d5f41b999a27fefd61d3e Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 15 Apr 2022 17:44:44 +0200 Subject: [PATCH 034/122] fix: ignore certmanager resource delete issue To be fixed definitively once certmanager refacto will be over --- src/cloud_provider/kubernetes.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 5b05ee65..22735c74 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -805,22 +805,24 @@ where }, ) { Ok(_) => {} - Err(Operation { error, .. }) => { - return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details, + Err(Operation { error, .. }) => logger.log(EngineEvent::Error( + EngineError::new_cannot_uninstall_helm_chart( + event_details.clone(), "Cert-Manager".to_string(), object.to_string(), error, - )) - } - Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_uninstall_helm_chart( - event_details, + ), + None, + )), + Err(retry::Error::Internal(msg)) => logger.log(EngineEvent::Error( + EngineError::new_cannot_uninstall_helm_chart( + event_details.clone(), "Cert-Manager".to_string(), object.to_string(), CommandError::new_from_safe_message(msg), - )) - } + ), + None, + )), } } From 3a149c19e14543db0795f85304884dc8b6ecbafc Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 15 Apr 2022 19:11:07 +0200 Subject: [PATCH 035/122] fix: disable DO tests while we're not ready for 1.21 version --- tests/digitalocean/do_kubernetes.rs | 128 +++++++++++------------ tests/digitalocean/do_whole_enchilada.rs | 96 ++++++++--------- 2 files changed, 112 insertions(+), 112 deletions(-) diff --git a/tests/digitalocean/do_kubernetes.rs b/tests/digitalocean/do_kubernetes.rs index 653e2ea2..bd3b3484 100644 --- a/tests/digitalocean/do_kubernetes.rs +++ b/tests/digitalocean/do_kubernetes.rs @@ -1,64 +1,64 @@ -extern crate test_utilities; - -use self::test_utilities::common::ClusterDomain; -use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; -use ::function_name::named; -use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::digital_ocean::DoRegion; -use test_utilities::common::{cluster_test, ClusterTestType}; - -#[cfg(feature = "test-do-infra")] -fn create_and_destroy_doks_cluster( - region: DoRegion, - test_type: ClusterTestType, - major_boot_version: u8, - minor_boot_version: u8, - test_name: &str, -) { - engine_run_test(|| { - cluster_test( - test_name, - Kind::Do, - context(generate_id().as_str(), generate_cluster_id(region.as_str()).as_str()), - logger(), - region.as_str(), - None, - test_type, - major_boot_version, - minor_boot_version, - &ClusterDomain::Default, - None, - None, - ) - }) -} - -#[cfg(feature = "test-do-infra")] -#[named] -#[test] -fn create_and_destroy_doks_cluster_ams_3() { - let region = DoRegion::Amsterdam3; - create_and_destroy_doks_cluster( - region, - ClusterTestType::Classic, - DO_KUBERNETES_MAJOR_VERSION, - DO_KUBERNETES_MINOR_VERSION, - function_name!(), - ); -} - -#[cfg(feature = "test-do-infra")] -#[named] -#[test] -#[ignore] -fn create_upgrade_and_destroy_doks_cluster_in_nyc_3() { - let region = DoRegion::NewYorkCity3; - create_and_destroy_doks_cluster( - region, - ClusterTestType::WithUpgrade, - DO_KUBERNETES_MAJOR_VERSION, - DO_KUBERNETES_MINOR_VERSION, - function_name!(), - ); -} +// extern crate test_utilities; +// +// use self::test_utilities::common::ClusterDomain; +// use self::test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; +// use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +// use ::function_name::named; +// use qovery_engine::cloud_provider::Kind; +// use qovery_engine::models::digital_ocean::DoRegion; +// use test_utilities::common::{cluster_test, ClusterTestType}; +// +// #[cfg(feature = "test-do-infra")] +// fn create_and_destroy_doks_cluster( +// region: DoRegion, +// test_type: ClusterTestType, +// major_boot_version: u8, +// minor_boot_version: u8, +// test_name: &str, +// ) { +// engine_run_test(|| { +// cluster_test( +// test_name, +// Kind::Do, +// context(generate_id().as_str(), generate_cluster_id(region.as_str()).as_str()), +// logger(), +// region.as_str(), +// None, +// test_type, +// major_boot_version, +// minor_boot_version, +// &ClusterDomain::Default, +// None, +// None, +// ) +// }) +// } +// +// #[cfg(feature = "test-do-infra")] +// #[named] +// #[test] +// fn create_and_destroy_doks_cluster_ams_3() { +// let region = DoRegion::Amsterdam3; +// create_and_destroy_doks_cluster( +// region, +// ClusterTestType::Classic, +// DO_KUBERNETES_MAJOR_VERSION, +// DO_KUBERNETES_MINOR_VERSION, +// function_name!(), +// ); +// } +// +// #[cfg(feature = "test-do-infra")] +// #[named] +// #[test] +// #[ignore] +// fn create_upgrade_and_destroy_doks_cluster_in_nyc_3() { +// let region = DoRegion::NewYorkCity3; +// create_and_destroy_doks_cluster( +// region, +// ClusterTestType::WithUpgrade, +// DO_KUBERNETES_MAJOR_VERSION, +// DO_KUBERNETES_MINOR_VERSION, +// function_name!(), +// ); +// } diff --git a/tests/digitalocean/do_whole_enchilada.rs b/tests/digitalocean/do_whole_enchilada.rs index 2851c16f..c57c10ef 100644 --- a/tests/digitalocean/do_whole_enchilada.rs +++ b/tests/digitalocean/do_whole_enchilada.rs @@ -1,49 +1,49 @@ -use ::function_name::named; -use qovery_engine::cloud_provider::Kind; -use qovery_engine::models::digital_ocean::DoRegion; -use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; -use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; -use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; +// use ::function_name::named; +// use qovery_engine::cloud_provider::Kind; +// use qovery_engine::models::digital_ocean::DoRegion; +// use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; +// use test_utilities::digitalocean::{DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION}; +// use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; -#[cfg(feature = "test-do-whole-enchilada")] -#[named] -#[test] -fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { - let logger = logger(); - let region = DoRegion::Amsterdam3; - - let organization_id = generate_id(); - let cluster_id = generate_cluster_id(region.as_str()); - let context = context(organization_id.as_str(), cluster_id.as_str()); - - let secrets = FuncTestsSecrets::new(); - let cluster_domain = format!( - "{}.{}", - cluster_id.as_str(), - secrets - .DEFAULT_TEST_DOMAIN - .as_ref() - .expect("DEFAULT_TEST_DOMAIN is not set in secrets") - .as_str() - ); - - let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); - let env_action = environment; - - engine_run_test(|| { - cluster_test( - function_name!(), - Kind::Do, - context.clone(), - logger, - region.as_str(), - None, - ClusterTestType::Classic, - DO_KUBERNETES_MAJOR_VERSION, - DO_KUBERNETES_MINOR_VERSION, - &ClusterDomain::Custom(cluster_domain), - None, - Some(&env_action), - ) - }) -} +// #[cfg(feature = "test-do-whole-enchilada")] +// #[named] +// #[test] +// fn create_upgrade_and_destroy_doks_cluster_with_env_in_ams_3() { +// let logger = logger(); +// let region = DoRegion::Amsterdam3; +// +// let organization_id = generate_id(); +// let cluster_id = generate_cluster_id(region.as_str()); +// let context = context(organization_id.as_str(), cluster_id.as_str()); +// +// let secrets = FuncTestsSecrets::new(); +// let cluster_domain = format!( +// "{}.{}", +// cluster_id.as_str(), +// secrets +// .DEFAULT_TEST_DOMAIN +// .as_ref() +// .expect("DEFAULT_TEST_DOMAIN is not set in secrets") +// .as_str() +// ); +// +// let environment = test_utilities::common::working_minimal_environment(&context, cluster_domain.as_str()); +// let env_action = environment; +// +// engine_run_test(|| { +// cluster_test( +// function_name!(), +// Kind::Do, +// context.clone(), +// logger, +// region.as_str(), +// None, +// ClusterTestType::Classic, +// DO_KUBERNETES_MAJOR_VERSION, +// DO_KUBERNETES_MINOR_VERSION, +// &ClusterDomain::Custom(cluster_domain), +// None, +// Some(&env_action), +// ) +// }) +// } From cf7a1585d6e748eea4c186dd7f6c149e8ae427b5 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 15 Apr 2022 21:56:40 +0200 Subject: [PATCH 036/122] fix: name was used instead of id in a db test --- tests/digitalocean/do_databases.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index 48826a55..e806c2c2 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -355,7 +355,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql-{}-0", &environment_check.databases[0].name); + let database_name = format!("postgresql-{}-0", &environment_check.databases[0].id); match is_pod_restarted_env( context.clone(), ProviderKind::Do, From c3f69a91cb1cfdacaf6f5d1ea47bd5835b4f7d41 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 14 Apr 2022 21:00:13 +0200 Subject: [PATCH 037/122] feat: speed up env delete Speed up environment delete and fix possible issues occuring because of required redeploy before destroy terraform side --- src/cloud_provider/kubernetes.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 22735c74..49174f8b 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -22,7 +22,8 @@ use crate::cloud_provider::{service, CloudProvider, DeploymentTarget}; use crate::cmd::kubectl; use crate::cmd::kubectl::{ kubectl_delete_objects_in_all_namespaces, kubectl_exec_count_all_objects, kubectl_exec_delete_pod, - kubectl_exec_get_node, kubectl_exec_version, kubectl_get_crash_looping_pods, kubernetes_get_all_pdbs, + kubectl_exec_get_node, kubectl_exec_is_namespace_present, kubectl_exec_version, kubectl_get_crash_looping_pods, + kubernetes_get_all_pdbs, }; use crate::cmd::structs::KubernetesNodeCondition; use crate::dns_provider::DnsProvider; @@ -668,6 +669,19 @@ pub fn delete_environment( ) -> Result<(), EngineError> { let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + let kubeconfig = kubernetes.get_kubeconfig_file_path()?; + + // check if environment is not already deleted + // speed up delete env because of terraform requiring apply + destroy + if !kubectl_exec_is_namespace_present( + kubeconfig.clone(), + environment.namespace(), + kubernetes.cloud_provider().credentials_environment_variables(), + ) { + info!("no need to delete environment {}, already absent", environment.namespace()); + return Ok(()); + }; + let stateful_deployment_target = DeploymentTarget { kubernetes, environment, @@ -749,7 +763,7 @@ pub fn delete_environment( // do not catch potential error - to confirm let _ = kubectl::kubectl_exec_delete_namespace( - kubernetes.get_kubeconfig_file_path()?, + kubeconfig, environment.namespace(), kubernetes.cloud_provider().credentials_environment_variables(), ); From 1303e2e668d7008c62236bf4aa2f6551f6e2eafd Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 16 Mar 2022 23:30:39 +0100 Subject: [PATCH 038/122] feat: add new tag to cloud providers --- lib/aws/bootstrap/eks-master-cluster.j2.tf | 3 ++- lib/aws/bootstrap/tf-default-vars.j2.tf | 6 ++++++ lib/digitalocean/bootstrap/ks-locals.j2.tf | 3 ++- lib/digitalocean/bootstrap/tf-default-vars.j2.tf | 6 ++++++ lib/scaleway/bootstrap/ks-locals.j2.tf | 3 ++- lib/scaleway/bootstrap/tf-default-vars.j2.tf | 6 ++++++ src/cloud_provider/aws/kubernetes/mod.rs | 1 + src/cloud_provider/scaleway/kubernetes/mod.rs | 1 + 8 files changed, 26 insertions(+), 3 deletions(-) diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf index c8974174..9a5e215a 100644 --- a/lib/aws/bootstrap/eks-master-cluster.j2.tf +++ b/lib/aws/bootstrap/eks-master-cluster.j2.tf @@ -6,8 +6,9 @@ locals { locals { tags_common = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name, + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/aws/bootstrap/tf-default-vars.j2.tf b/lib/aws/bootstrap/tf-default-vars.j2.tf index cf296e4d..b9517338 100644 --- a/lib/aws/bootstrap/tf-default-vars.j2.tf +++ b/lib/aws/bootstrap/tf-default-vars.j2.tf @@ -111,6 +111,12 @@ variable "eks_k8s_versions" { type = map(string) } +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/lib/digitalocean/bootstrap/ks-locals.j2.tf b/lib/digitalocean/bootstrap/ks-locals.j2.tf index 1f4da11e..03a1fdb5 100644 --- a/lib/digitalocean/bootstrap/ks-locals.j2.tf +++ b/lib/digitalocean/bootstrap/ks-locals.j2.tf @@ -1,7 +1,8 @@ locals { tags_ks = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf index 35d00f8c..64e1144b 100644 --- a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf +++ b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf @@ -64,6 +64,12 @@ variable "vpc_cidr_set" { type = string } +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster name" default = "{{ doks_cluster_id }}" diff --git a/lib/scaleway/bootstrap/ks-locals.j2.tf b/lib/scaleway/bootstrap/ks-locals.j2.tf index 3f69d9e2..f06db392 100644 --- a/lib/scaleway/bootstrap/ks-locals.j2.tf +++ b/lib/scaleway/bootstrap/ks-locals.j2.tf @@ -1,7 +1,8 @@ locals { tags_ks = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/scaleway/bootstrap/tf-default-vars.j2.tf b/lib/scaleway/bootstrap/tf-default-vars.j2.tf index 73900b51..0502aa85 100644 --- a/lib/scaleway/bootstrap/tf-default-vars.j2.tf +++ b/lib/scaleway/bootstrap/tf-default-vars.j2.tf @@ -69,6 +69,12 @@ variable "scaleway_secret_key" { # Kubernetes +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 7e6b3077..5b6936f5 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -498,6 +498,7 @@ impl EKS { context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); + context.insert("kubernetes_full_cluster_id", self.context.cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_worker_nodes", &self.nodes_groups); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index cb7fecd1..bfc48a0e 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -468,6 +468,7 @@ impl Kapsule { // Kubernetes context.insert("test_cluster", &self.context.is_test_cluster()); + context.insert("kubernetes_full_cluster_id", self.context().cluster_id()); context.insert("kubernetes_cluster_id", self.id()); context.insert("kubernetes_cluster_name", self.cluster_name().as_str()); context.insert("kubernetes_cluster_version", self.version()); From 50b58064fc0c4f3d123934d1091f365c783abf19 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 19 Apr 2022 11:57:48 +0200 Subject: [PATCH 039/122] feat: add application commit ID to logs (#689) This CL adds: - application commit ID to some application actions logs. - application commit ID annotation / label to application deployment yaml so it can be retrieved later on from other services Ticket: ENG-1170 --- .../charts/q-application/templates/deployment.j2.yaml | 1 + .../charts/q-application/templates/deployment.j2.yaml | 1 + .../charts/q-application/templates/deployment.j2.yaml | 1 + src/cloud_provider/service.rs | 9 ++++++--- src/error.rs | 5 +++-- src/errors/mod.rs | 2 +- src/events/io.rs | 4 +++- src/events/mod.rs | 7 +++++-- src/logger.rs | 5 +++-- src/models/application.rs | 6 +++++- 10 files changed, 29 insertions(+), 12 deletions(-) diff --git a/lib/aws/charts/q-application/templates/deployment.j2.yaml b/lib/aws/charts/q-application/templates/deployment.j2.yaml index 84053b29..b942e99f 100644 --- a/lib/aws/charts/q-application/templates/deployment.j2.yaml +++ b/lib/aws/charts/q-application/templates/deployment.j2.yaml @@ -35,6 +35,7 @@ spec: app: {{ sanitized_name }} annotations: checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + appCommitId: {{ version }} spec: affinity: podAntiAffinity: diff --git a/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml b/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml index 84053b29..b942e99f 100644 --- a/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml @@ -35,6 +35,7 @@ spec: app: {{ sanitized_name }} annotations: checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + appCommitId: {{ version }} spec: affinity: podAntiAffinity: diff --git a/lib/scaleway/charts/q-application/templates/deployment.j2.yaml b/lib/scaleway/charts/q-application/templates/deployment.j2.yaml index 8bd0f742..9d407b87 100644 --- a/lib/scaleway/charts/q-application/templates/deployment.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/deployment.j2.yaml @@ -34,6 +34,7 @@ spec: app: {{ sanitized_name }} annotations: checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + appCommitId: {{ version }} spec: affinity: podAntiAffinity: diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 2593d5d9..05ac86df 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -36,6 +36,9 @@ pub trait Service: ToTransmitter { fn name_with_id(&self) -> String { format!("{} ({})", self.name(), self.id()) } + fn name_with_id_and_version(&self) -> String { + format!("{} ({}) version: {}", self.name(), self.id(), self.version()) + } fn workspace_directory(&self) -> String { let dir_root = match self.service_type() { ServiceType::Application => "applications", @@ -1215,17 +1218,17 @@ where Action::Create => Some(format!( "{} '{}' deployment is in progress...", service.service_type().name(), - service.name_with_id() + service.name_with_id_and_version(), )), Action::Pause => Some(format!( "{} '{}' pause is in progress...", service.service_type().name(), - service.name_with_id() + service.name_with_id_and_version(), )), Action::Delete => Some(format!( "{} '{}' deletion is in progress...", service.service_type().name(), - service.name_with_id() + service.name_with_id_and_version(), )), Action::Nothing => None, }; diff --git a/src/error.rs b/src/error.rs index 0cc266dd..76a831ab 100644 --- a/src/error.rs +++ b/src/error.rs @@ -4,6 +4,7 @@ use std::process::ExitStatus; pub type Type = String; pub type Id = String; pub type Name = String; +pub type Version = String; #[derive(Debug)] #[deprecated(note = "errors.EngineError to be used instead")] @@ -44,7 +45,7 @@ pub enum EngineErrorScope { ObjectStorage(Id, Name), Environment(Id, Name), Database(Id, Type, Name), - Application(Id, Name), + Application(Id, Name, Version), Router(Id, Name), } @@ -59,7 +60,7 @@ impl From for EngineErrorScope { Transmitter::ObjectStorage(id, name) => EngineErrorScope::ObjectStorage(id, name), Transmitter::Environment(id, name) => EngineErrorScope::Environment(id, name), Transmitter::Database(id, db_type, name) => EngineErrorScope::Database(id, db_type, name), - Transmitter::Application(id, name) => EngineErrorScope::Application(id, name), + Transmitter::Application(id, name, commit) => EngineErrorScope::Application(id, name, commit), Transmitter::Router(id, name) => EngineErrorScope::Router(id, name), } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 9a0a88b7..dd47f0a8 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -470,7 +470,7 @@ impl EngineError { EngineErrorScope::ObjectStorage(id, name) => Transmitter::ObjectStorage(id, name), EngineErrorScope::Environment(id, name) => Transmitter::Environment(id, name), EngineErrorScope::Database(id, db_type, name) => Transmitter::Database(id, db_type, name), - EngineErrorScope::Application(id, name) => Transmitter::Application(id, name), + EngineErrorScope::Application(id, name, commit) => Transmitter::Application(id, name, commit), EngineErrorScope::Router(id, name) => Transmitter::Router(id, name), }, ), diff --git a/src/events/io.rs b/src/events/io.rs index 9bf9693c..b98bc376 100644 --- a/src/events/io.rs +++ b/src/events/io.rs @@ -163,6 +163,7 @@ impl From for EnvironmentStep { type TransmitterId = String; type TransmitterName = String; type TransmitterType = String; +type TransmitterVersion = String; #[derive(Deserialize, Serialize)] #[serde(rename_all = "lowercase")] @@ -204,6 +205,7 @@ pub enum Transmitter { Application { id: TransmitterId, name: TransmitterName, + commit: TransmitterVersion, }, Router { id: TransmitterId, @@ -222,7 +224,7 @@ impl From for Transmitter { events::Transmitter::ObjectStorage(id, name) => Transmitter::ObjectStorage { id, name }, events::Transmitter::Environment(id, name) => Transmitter::Environment { id, name }, events::Transmitter::Database(id, db_type, name) => Transmitter::Database { id, db_type, name }, - events::Transmitter::Application(id, name) => Transmitter::Application { id, name }, + events::Transmitter::Application(id, name, commit) => Transmitter::Application { id, name, commit }, events::Transmitter::Router(id, name) => Transmitter::Router { id, name }, } } diff --git a/src/events/mod.rs b/src/events/mod.rs index 5867a948..239bfc62 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -326,6 +326,8 @@ type TransmitterId = String; type TransmitterName = String; /// TransmitterType: represents a transmitter type. type TransmitterType = String; // TODO(benjaminch): makes it a real enum / type +/// TransmitterVersion: represents a transmitter version. +type TransmitterVersion = String; #[derive(Debug, Clone, PartialEq)] /// Transmitter: represents the event's source caller (transmitter). @@ -347,7 +349,7 @@ pub enum Transmitter { /// Database: database engine part. Database(TransmitterId, TransmitterType, TransmitterName), /// Application: application engine part. - Application(TransmitterId, TransmitterName), + Application(TransmitterId, TransmitterName, TransmitterVersion), /// Router: router engine part. Router(TransmitterId, TransmitterName), } @@ -366,7 +368,8 @@ impl Display for Transmitter { Transmitter::ObjectStorage(id, name) => format!("object_strorage({}, {})", id, name), Transmitter::Environment(id, name) => format!("environment({}, {})", id, name), Transmitter::Database(id, db_type, name) => format!("database({}, {}, {})", id, db_type, name), - Transmitter::Application(id, name) => format!("application({}, {})", id, name), + Transmitter::Application(id, name, version) => + format!("application({}, {}, commit: {})", id, name, version), Transmitter::Router(id, name) => format!("router({}, {})", id, name), } ) diff --git a/src/logger.rs b/src/logger.rs index cec6dc6a..15c9751f 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -97,6 +97,7 @@ mod tests { let execution_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let app_id = QoveryIdentifier::new_from_long_id(Uuid::new_v4().to_string()); let app_name = format!("simple-app-{}", app_id); + let app_version = Uuid::new_v4(); let qovery_message = "Qovery message"; let user_message = "User message"; let safe_message = "Safe message"; @@ -155,7 +156,7 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Environment(EnvironmentStep::Pause), - Transmitter::Application(app_id.to_string(), app_name.to_string()), + Transmitter::Application(app_id.to_string(), app_name.to_string(), app_version.to_string()), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), @@ -170,7 +171,7 @@ mod tests { execution_id.clone(), Some(ScwRegion::Paris.as_str().to_string()), Stage::Environment(EnvironmentStep::Delete), - Transmitter::Application(app_id.to_string(), app_name), + Transmitter::Application(app_id.to_string(), app_name, app_version.to_string()), ), EventMessage::new(raw_message.to_string(), Some(safe_message.to_string())), ), diff --git a/src/models/application.rs b/src/models/application.rs index c8037108..71fd38e2 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -246,7 +246,7 @@ impl Application { // Traits implementations impl ToTransmitter for Application { fn to_transmitter(&self) -> Transmitter { - Transmitter::Application(self.id.to_string(), self.name.to_string()) + Transmitter::Application(self.id.to_string(), self.name.to_string(), self.commit_id()) } } @@ -280,6 +280,10 @@ where self.name() } + fn name_with_id_and_version(&self) -> String { + format!("{} ({}) commit: {}", self.name(), self.id(), self.commit_id()) + } + fn sanitized_name(&self) -> String { self.sanitized_name() } From 0bcf53541444999d4689de14b492c8fe149924ac Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 19 Apr 2022 11:58:22 +0200 Subject: [PATCH 040/122] fix: avoid exposing env vars in helm logs (#691) --- src/cmd/helm.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index daf0f0f2..bb0aca91 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -512,7 +512,11 @@ impl Helm { // Try do define/specify a bit more the message let stderr_msg: String = error_message.into_iter().collect(); - let stderr_msg = format!("{}: {}", stderr_msg, err.message(ErrorMessageVerbosity::FullDetails)); + let stderr_msg = format!( + "{}: {}", + stderr_msg, + err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) + ); let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { HelmError::ReleaseLocked(chart.name.clone()) } else if stderr_msg.contains("has been rolled back") { From f93a07b2ddea93a3f35dc556140060e6b4efd644 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 19 Apr 2022 14:20:53 +0200 Subject: [PATCH 041/122] Revert "feat: add new tag to cloud providers" This reverts commit 1303e2e668d7008c62236bf4aa2f6551f6e2eafd. --- lib/aws/bootstrap/eks-master-cluster.j2.tf | 3 +-- lib/aws/bootstrap/tf-default-vars.j2.tf | 6 ------ lib/digitalocean/bootstrap/ks-locals.j2.tf | 3 +-- lib/digitalocean/bootstrap/tf-default-vars.j2.tf | 6 ------ lib/scaleway/bootstrap/ks-locals.j2.tf | 3 +-- lib/scaleway/bootstrap/tf-default-vars.j2.tf | 6 ------ src/cloud_provider/aws/kubernetes/mod.rs | 1 - src/cloud_provider/scaleway/kubernetes/mod.rs | 1 - 8 files changed, 3 insertions(+), 26 deletions(-) diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf index 9a5e215a..c8974174 100644 --- a/lib/aws/bootstrap/eks-master-cluster.j2.tf +++ b/lib/aws/bootstrap/eks-master-cluster.j2.tf @@ -6,9 +6,8 @@ locals { locals { tags_common = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id ClusterName = var.kubernetes_cluster_name, - OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/aws/bootstrap/tf-default-vars.j2.tf b/lib/aws/bootstrap/tf-default-vars.j2.tf index b9517338..cf296e4d 100644 --- a/lib/aws/bootstrap/tf-default-vars.j2.tf +++ b/lib/aws/bootstrap/tf-default-vars.j2.tf @@ -111,12 +111,6 @@ variable "eks_k8s_versions" { type = map(string) } -variable "kubernetes_full_cluster_id" { - description = "Kubernetes full cluster id" - default = "{{ kubernetes_full_cluster_id }}" - type = string -} - variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/lib/digitalocean/bootstrap/ks-locals.j2.tf b/lib/digitalocean/bootstrap/ks-locals.j2.tf index 03a1fdb5..1f4da11e 100644 --- a/lib/digitalocean/bootstrap/ks-locals.j2.tf +++ b/lib/digitalocean/bootstrap/ks-locals.j2.tf @@ -1,8 +1,7 @@ locals { tags_ks = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id ClusterName = var.kubernetes_cluster_name - OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf index 64e1144b..35d00f8c 100644 --- a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf +++ b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf @@ -64,12 +64,6 @@ variable "vpc_cidr_set" { type = string } -variable "kubernetes_full_cluster_id" { - description = "Kubernetes full cluster id" - default = "{{ kubernetes_full_cluster_id }}" - type = string -} - variable "kubernetes_cluster_id" { description = "Kubernetes cluster name" default = "{{ doks_cluster_id }}" diff --git a/lib/scaleway/bootstrap/ks-locals.j2.tf b/lib/scaleway/bootstrap/ks-locals.j2.tf index f06db392..3f69d9e2 100644 --- a/lib/scaleway/bootstrap/ks-locals.j2.tf +++ b/lib/scaleway/bootstrap/ks-locals.j2.tf @@ -1,8 +1,7 @@ locals { tags_ks = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id ClusterName = var.kubernetes_cluster_name - OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/scaleway/bootstrap/tf-default-vars.j2.tf b/lib/scaleway/bootstrap/tf-default-vars.j2.tf index 0502aa85..73900b51 100644 --- a/lib/scaleway/bootstrap/tf-default-vars.j2.tf +++ b/lib/scaleway/bootstrap/tf-default-vars.j2.tf @@ -69,12 +69,6 @@ variable "scaleway_secret_key" { # Kubernetes -variable "kubernetes_full_cluster_id" { - description = "Kubernetes full cluster id" - default = "{{ kubernetes_full_cluster_id }}" - type = string -} - variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 5b6936f5..7e6b3077 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -498,7 +498,6 @@ impl EKS { context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); - context.insert("kubernetes_full_cluster_id", self.context.cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_worker_nodes", &self.nodes_groups); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index bfc48a0e..cb7fecd1 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -468,7 +468,6 @@ impl Kapsule { // Kubernetes context.insert("test_cluster", &self.context.is_test_cluster()); - context.insert("kubernetes_full_cluster_id", self.context().cluster_id()); context.insert("kubernetes_cluster_id", self.id()); context.insert("kubernetes_cluster_name", self.cluster_name().as_str()); context.insert("kubernetes_cluster_version", self.version()); From 6a6a836308108386967137997ce2d2ef352ef316 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Tue, 19 Apr 2022 14:26:17 +0200 Subject: [PATCH 042/122] Revert "Revert "feat: add new tag to cloud providers"" This reverts commit f93a07b2ddea93a3f35dc556140060e6b4efd644. --- lib/aws/bootstrap/eks-master-cluster.j2.tf | 3 ++- lib/aws/bootstrap/tf-default-vars.j2.tf | 6 ++++++ lib/digitalocean/bootstrap/ks-locals.j2.tf | 3 ++- lib/digitalocean/bootstrap/tf-default-vars.j2.tf | 6 ++++++ lib/scaleway/bootstrap/ks-locals.j2.tf | 3 ++- lib/scaleway/bootstrap/tf-default-vars.j2.tf | 6 ++++++ src/cloud_provider/aws/kubernetes/mod.rs | 1 + src/cloud_provider/scaleway/kubernetes/mod.rs | 1 + 8 files changed, 26 insertions(+), 3 deletions(-) diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf index c8974174..9a5e215a 100644 --- a/lib/aws/bootstrap/eks-master-cluster.j2.tf +++ b/lib/aws/bootstrap/eks-master-cluster.j2.tf @@ -6,8 +6,9 @@ locals { locals { tags_common = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name, + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/aws/bootstrap/tf-default-vars.j2.tf b/lib/aws/bootstrap/tf-default-vars.j2.tf index cf296e4d..b9517338 100644 --- a/lib/aws/bootstrap/tf-default-vars.j2.tf +++ b/lib/aws/bootstrap/tf-default-vars.j2.tf @@ -111,6 +111,12 @@ variable "eks_k8s_versions" { type = map(string) } +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/lib/digitalocean/bootstrap/ks-locals.j2.tf b/lib/digitalocean/bootstrap/ks-locals.j2.tf index 1f4da11e..03a1fdb5 100644 --- a/lib/digitalocean/bootstrap/ks-locals.j2.tf +++ b/lib/digitalocean/bootstrap/ks-locals.j2.tf @@ -1,7 +1,8 @@ locals { tags_ks = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf index 35d00f8c..64e1144b 100644 --- a/lib/digitalocean/bootstrap/tf-default-vars.j2.tf +++ b/lib/digitalocean/bootstrap/tf-default-vars.j2.tf @@ -64,6 +64,12 @@ variable "vpc_cidr_set" { type = string } +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster name" default = "{{ doks_cluster_id }}" diff --git a/lib/scaleway/bootstrap/ks-locals.j2.tf b/lib/scaleway/bootstrap/ks-locals.j2.tf index 3f69d9e2..f06db392 100644 --- a/lib/scaleway/bootstrap/ks-locals.j2.tf +++ b/lib/scaleway/bootstrap/ks-locals.j2.tf @@ -1,7 +1,8 @@ locals { tags_ks = { - ClusterId = var.kubernetes_cluster_id + ClusterId = var.kubernetes_full_cluster_id ClusterName = var.kubernetes_cluster_name + OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %} diff --git a/lib/scaleway/bootstrap/tf-default-vars.j2.tf b/lib/scaleway/bootstrap/tf-default-vars.j2.tf index 73900b51..0502aa85 100644 --- a/lib/scaleway/bootstrap/tf-default-vars.j2.tf +++ b/lib/scaleway/bootstrap/tf-default-vars.j2.tf @@ -69,6 +69,12 @@ variable "scaleway_secret_key" { # Kubernetes +variable "kubernetes_full_cluster_id" { + description = "Kubernetes full cluster id" + default = "{{ kubernetes_full_cluster_id }}" + type = string +} + variable "kubernetes_cluster_id" { description = "Kubernetes cluster id" default = "{{ kubernetes_cluster_id }}" diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 7e6b3077..5b6936f5 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -498,6 +498,7 @@ impl EKS { context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); + context.insert("kubernetes_full_cluster_id", self.context.cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_worker_nodes", &self.nodes_groups); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index cb7fecd1..bfc48a0e 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -468,6 +468,7 @@ impl Kapsule { // Kubernetes context.insert("test_cluster", &self.context.is_test_cluster()); + context.insert("kubernetes_full_cluster_id", self.context().cluster_id()); context.insert("kubernetes_cluster_id", self.id()); context.insert("kubernetes_cluster_name", self.cluster_name().as_str()); context.insert("kubernetes_cluster_version", self.version()); From 7b57f7de0db0d674d00fbf9a3f3a969f04af3b67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Tue, 19 Apr 2022 14:34:27 +0200 Subject: [PATCH 043/122] Remove cluster name from tag, as it may contain invalid characters (#692) --- lib/aws/bootstrap/eks-master-cluster.j2.tf | 1 - lib/digitalocean/bootstrap/ks-locals.j2.tf | 1 - lib/scaleway/bootstrap/ks-locals.j2.tf | 1 - 3 files changed, 3 deletions(-) diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf index 9a5e215a..32b119a3 100644 --- a/lib/aws/bootstrap/eks-master-cluster.j2.tf +++ b/lib/aws/bootstrap/eks-master-cluster.j2.tf @@ -7,7 +7,6 @@ locals { locals { tags_common = { ClusterId = var.kubernetes_full_cluster_id - ClusterName = var.kubernetes_cluster_name, OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 diff --git a/lib/digitalocean/bootstrap/ks-locals.j2.tf b/lib/digitalocean/bootstrap/ks-locals.j2.tf index 03a1fdb5..dec6438f 100644 --- a/lib/digitalocean/bootstrap/ks-locals.j2.tf +++ b/lib/digitalocean/bootstrap/ks-locals.j2.tf @@ -1,7 +1,6 @@ locals { tags_ks = { ClusterId = var.kubernetes_full_cluster_id - ClusterName = var.kubernetes_cluster_name OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 diff --git a/lib/scaleway/bootstrap/ks-locals.j2.tf b/lib/scaleway/bootstrap/ks-locals.j2.tf index f06db392..71da18e8 100644 --- a/lib/scaleway/bootstrap/ks-locals.j2.tf +++ b/lib/scaleway/bootstrap/ks-locals.j2.tf @@ -1,7 +1,6 @@ locals { tags_ks = { ClusterId = var.kubernetes_full_cluster_id - ClusterName = var.kubernetes_cluster_name OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 From 2959567198a6295a7582f3dc308f9a52508778cf Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 20 Apr 2022 18:14:58 +0200 Subject: [PATCH 044/122] feat: add edge aws struct --- lib/edge/aws/backend.j2.tf | 10 + lib/edge/aws/documentdb.tf | 81 +++++ lib/edge/aws/eks-vpc-common.j2.tf | 42 +++ .../aws/eks-vpc-without-nat-gateways.j2.tf | 75 ++++ lib/edge/aws/elasticcache.tf | 80 +++++ lib/edge/aws/elasticsearch.tf | 79 +++++ lib/edge/aws/qovery-vault.j2.tf | 29 ++ lib/edge/aws/rds.tf | 118 +++++++ lib/edge/aws/s3-qovery-buckets.tf | 44 +++ lib/edge/aws/tf-default-vars.j2.tf | 319 ++++++++++++++++++ lib/edge/aws/tf-providers-aws.j2.tf | 60 ++++ src/cloud_provider/io.rs | 4 +- src/cloud_provider/mod.rs | 7 + src/io_models.rs | 207 +++++++++++- test_utilities/src/common.rs | 6 +- test_utilities/src/edge_aws_rs.rs | 1 + test_utilities/src/lib.rs | 1 + tests/edge/aws/edge_aws_kubernetes.rs | 65 ++++ tests/edge/aws/mod.rs | 1 + tests/edge/mod.rs | 1 + tests/lib.rs | 1 + 21 files changed, 1227 insertions(+), 4 deletions(-) create mode 100644 lib/edge/aws/backend.j2.tf create mode 100644 lib/edge/aws/documentdb.tf create mode 100644 lib/edge/aws/eks-vpc-common.j2.tf create mode 100644 lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf create mode 100644 lib/edge/aws/elasticcache.tf create mode 100644 lib/edge/aws/elasticsearch.tf create mode 100644 lib/edge/aws/qovery-vault.j2.tf create mode 100644 lib/edge/aws/rds.tf create mode 100644 lib/edge/aws/s3-qovery-buckets.tf create mode 100644 lib/edge/aws/tf-default-vars.j2.tf create mode 100644 lib/edge/aws/tf-providers-aws.j2.tf create mode 100644 test_utilities/src/edge_aws_rs.rs create mode 100644 tests/edge/aws/edge_aws_kubernetes.rs create mode 100644 tests/edge/aws/mod.rs create mode 100644 tests/edge/mod.rs diff --git a/lib/edge/aws/backend.j2.tf b/lib/edge/aws/backend.j2.tf new file mode 100644 index 00000000..a1418800 --- /dev/null +++ b/lib/edge/aws/backend.j2.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + access_key = "{{ aws_access_key_tfstates_account }}" + secret_key = "{{ aws_secret_key_tfstates_account }}" + bucket = "{{ aws_terraform_backend_bucket }}" + key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" + dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" + region = "{{ aws_region_tfstates_account }}" + } +} diff --git a/lib/edge/aws/documentdb.tf b/lib/edge/aws/documentdb.tf new file mode 100644 index 00000000..ea04fec0 --- /dev/null +++ b/lib/edge/aws/documentdb.tf @@ -0,0 +1,81 @@ +locals { + tags_documentdb = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "DocumentDB" + } + ) +} + +# Network + +resource "aws_subnet" "documentdb_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.documentdb_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.documentdb_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.documentdb_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_route_table_association" "documentdb_cluster_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_docdb_subnet_group" "documentdb" { + description = "DocumentDB linked to ${var.kubernetes_cluster_id}" + name = "documentdb-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) + + tags = local.tags_documentdb +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "documentdb_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow DocumentDB incoming access from anywhere" + from_port = 27017 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 27017 + type = "ingress" +} diff --git a/lib/edge/aws/eks-vpc-common.j2.tf b/lib/edge/aws/eks-vpc-common.j2.tf new file mode 100644 index 00000000..63b91880 --- /dev/null +++ b/lib/edge/aws/eks-vpc-common.j2.tf @@ -0,0 +1,42 @@ +data "aws_availability_zones" "available" {} + +locals { + tags_eks_vpc = merge( + local.tags_common, + { + Name = "qovery-eks-workers", + "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", + "kubernetes.io/role/elb" = 1, + {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} + } + ) + + tags_eks_vpc_public = merge( + local.tags_eks_vpc, + { + "Public" = "true" + } + ) + + tags_eks_vpc_private = merge( + local.tags_eks, + { + "Public" = "false" + } + ) +} + +# VPC +resource "aws_vpc" "eks" { + cidr_block = var.vpc_cidr_block + enable_dns_hostnames = true + + tags = local.tags_eks_vpc +} + +# Internet gateway +resource "aws_internet_gateway" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + tags = local.tags_eks_vpc +} \ No newline at end of file diff --git a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf new file mode 100644 index 00000000..d0174308 --- /dev/null +++ b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf @@ -0,0 +1,75 @@ +{% if vpc_qovery_network_mode == "WithoutNatGateways" %} +# Public subnets +resource "aws_subnet" "eks_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.eks_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.eks_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.eks_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_route_table" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.eks_cluster.id + } + + // todo(pmavro): add tests for it when it will be available in the SDK + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_eks_vpc +} + +resource "aws_route_table_association" "eks_cluster_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + subnet_id = aws_subnet.eks_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + subnet_id = aws_subnet.eks_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + subnet_id = aws_subnet.eks_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} +{% endif %} \ No newline at end of file diff --git a/lib/edge/aws/elasticcache.tf b/lib/edge/aws/elasticcache.tf new file mode 100644 index 00000000..44073c63 --- /dev/null +++ b/lib/edge/aws/elasticcache.tf @@ -0,0 +1,80 @@ +locals { + tags_elasticache = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "Elasticache" + } + ) +} + +# Network + +resource "aws_subnet" "elasticache_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticache_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticache_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticache_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_route_table_association" "elasticache_cluster_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_elasticache_subnet_group" "elasticache" { + description = "Elasticache linked to ${var.kubernetes_cluster_id}" + # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly + name = "elasticache-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "elasticache_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow Redis incoming access from anywhere" + from_port = 6379 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 6379 + type = "ingress" +} diff --git a/lib/edge/aws/elasticsearch.tf b/lib/edge/aws/elasticsearch.tf new file mode 100644 index 00000000..f5e873dd --- /dev/null +++ b/lib/edge/aws/elasticsearch.tf @@ -0,0 +1,79 @@ +locals { + tags_elasticsearch = merge( + local.tags_eks, + { + "Service" = "Elasticsearch" + } + ) +} + +# Network + +resource "aws_subnet" "elasticsearch_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticsearch_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticsearch_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticsearch_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_security_group" "elasticsearch" { + name = "elasticsearch-${var.kubernetes_cluster_id}" + description = "Elasticsearch security group" + vpc_id = aws_vpc.eks.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + + cidr_blocks = [ + aws_vpc.eks.cidr_block + ] + } + + tags = local.tags_elasticsearch +} diff --git a/lib/edge/aws/qovery-vault.j2.tf b/lib/edge/aws/qovery-vault.j2.tf new file mode 100644 index 00000000..b12afa38 --- /dev/null +++ b/lib/edge/aws/qovery-vault.j2.tf @@ -0,0 +1,29 @@ +locals { + kubeconfig_base64 = base64encode(local.kubeconfig) +} +// do not run for tests clusters to avoid uncleaned info. +// do not try to use count into resource, it will fails trying to connect to vault +{% if vault_auth_method != "none" and not test_cluster %} +resource "vault_generic_secret" "cluster-access" { + path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" + + data_json = < for Kind { @@ -15,6 +16,7 @@ impl From for Kind { KindModel::Aws => Kind::Aws, KindModel::Do => Kind::Do, KindModel::Scw => Kind::Scw, + KindModel::Edge(Edge::Aws) => Kind::Edge(Edge::Aws), } } } diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index 650b1d09..ba20716c 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -52,6 +52,12 @@ pub enum Kind { Aws, Do, Scw, + Edge(Edge), +} + +#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Edge { + Aws, } impl Display for Kind { @@ -60,6 +66,7 @@ impl Display for Kind { Kind::Aws => "AWS", Kind::Do => "Digital Ocean", Kind::Scw => "Scaleway", + Kind::Edge(Edge::Aws) => "Edge AWS", }) } } diff --git a/src/io_models.rs b/src/io_models.rs index 5c57e8cd..ac79b514 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -16,8 +16,8 @@ use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; -use crate::cloud_provider::Kind as CPKind; use crate::cloud_provider::{service, CloudProvider}; +use crate::cloud_provider::{Edge, Kind as CPKind, Kind}; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; @@ -295,6 +295,25 @@ impl Application { listeners, logger.clone(), )?)), + Kind::Edge(Edge::Aws) => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + self.advance_settings.clone(), + AwsAppExtraSettings {}, + listeners, + logger.clone(), + )?)), } } @@ -586,6 +605,22 @@ impl Router { )?); Ok(router) } + Kind::Edge(Edge::Aws) => { + let router = Box::new(models::router::Router::::new( + context.clone(), + self.id.as_str(), + self.name.as_str(), + self.action.to_service_action(), + self.default_domain.as_str(), + custom_domains, + routes, + self.sticky_sessions_enabled, + AwsRouterExtraSettings {}, + listeners, + logger, + )?); + Ok(router) + } } } } @@ -1064,6 +1099,176 @@ impl Database { service::DatabaseType::MongoDB, SCW::full_name().to_string(), )), + + (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + + (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } } } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index c000f998..7c2b440f 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -13,6 +13,7 @@ use qovery_engine::io_models::{ use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION}; +use crate::edge_aws_rs::AWS_K3S_VERSION; use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use crate::utilities::{ db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init, @@ -29,7 +30,7 @@ use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, Kind}; +use qovery_engine::cloud_provider::{CloudProvider, Edge, Kind}; use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; @@ -1135,10 +1136,11 @@ pub fn test_db( Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + Kind::Edge(Edge::Aws) => (AWS_TEST_REGION.to_string(), AWS_K3S_VERSION.to_string()), }; let engine_config = match provider_kind { - Kind::Aws => AWS::docker_cr_engine( + Kind::Aws | Kind::Edge(Edge::Aws) => AWS::docker_cr_engine( &context, logger.clone(), localisation.as_str(), diff --git a/test_utilities/src/edge_aws_rs.rs b/test_utilities/src/edge_aws_rs.rs new file mode 100644 index 00000000..1a50ae40 --- /dev/null +++ b/test_utilities/src/edge_aws_rs.rs @@ -0,0 +1 @@ +pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; diff --git a/test_utilities/src/lib.rs b/test_utilities/src/lib.rs index 4092f39e..14b7316b 100644 --- a/test_utilities/src/lib.rs +++ b/test_utilities/src/lib.rs @@ -7,5 +7,6 @@ pub mod aws; pub mod cloudflare; pub mod common; pub mod digitalocean; +pub mod edge_aws_rs; pub mod scaleway; pub mod utilities; diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/edge/aws/edge_aws_kubernetes.rs new file mode 100644 index 00000000..fbfdacb4 --- /dev/null +++ b/tests/edge/aws/edge_aws_kubernetes.rs @@ -0,0 +1,65 @@ +extern crate test_utilities; + +use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +use ::function_name::named; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; +use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::Kind; +use std::str::FromStr; +use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + +pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; + +#[cfg(feature = "test-aws-infra")] +fn create_and_destroy_edge_aws_cluster( + region: String, + test_type: ClusterTestType, + major_boot_version: u8, + minor_boot_version: u8, + vpc_network_mode: VpcQoveryNetworkMode, + test_name: &str, +) { + engine_run_test(|| { + let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let zones = region.get_zones(); + cluster_test( + test_name, + Kind::Aws, + context( + generate_id().as_str(), + generate_cluster_id(region.to_string().as_str()).as_str(), + ), + logger(), + region.to_aws_format().as_str(), + Some(zones), + test_type, + major_boot_version, + minor_boot_version, + &ClusterDomain::Default, + Option::from(vpc_network_mode), + None, + ) + }) +} + +/* + TESTS NOTES: + It is useful to keep 2 clusters deployment tests to run in // to validate there is no name collision (overlaping) +*/ + +#[cfg(feature = "test-aws-infra")] +#[named] +#[test] +fn create_and_destroy_edge_aws_cluster_eu_west_3() { + let region = "eu-west-3".to_string(); + create_and_destroy_eks_cluster( + region, + ClusterTestType::Classic, + K3S_MAJOR_VERSION, + K3S_MINOR_VERSION, + WithoutNatGateways, + function_name!(), + ); +} diff --git a/tests/edge/aws/mod.rs b/tests/edge/aws/mod.rs new file mode 100644 index 00000000..24609250 --- /dev/null +++ b/tests/edge/aws/mod.rs @@ -0,0 +1 @@ +mod edge_aws_kubernetes; diff --git a/tests/edge/mod.rs b/tests/edge/mod.rs new file mode 100644 index 00000000..827da9e3 --- /dev/null +++ b/tests/edge/mod.rs @@ -0,0 +1 @@ +mod aws; diff --git a/tests/lib.rs b/tests/lib.rs index bbc13eb3..18c6bc2d 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,4 +3,5 @@ extern crate maplit; mod aws; mod digitalocean; +mod edge; mod scaleway; From 759c521fb268dc48f1bbc60198e3149b1ae95a63 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Wed, 20 Apr 2022 18:44:45 +0200 Subject: [PATCH 045/122] fix: make ToTeraContext public trait --- src/models/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/types.rs b/src/models/types.rs index d63bbeb1..3ebbb243 100644 --- a/src/models/types.rs +++ b/src/models/types.rs @@ -27,7 +27,7 @@ pub trait CloudProvider { fn lib_directory_name() -> &'static str; } -pub(crate) trait ToTeraContext { +pub trait ToTeraContext { fn to_tera_context(&self, target: &DeploymentTarget) -> Result; } From eb21185c4128c10cb0cc68441ac60ee3a9d9146a Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Wed, 20 Apr 2022 22:10:28 +0200 Subject: [PATCH 046/122] wip: add ec2 Kubernetes.kind --- .../aws/kubernetes/helm_charts.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 161 +++++++++++++- src/cloud_provider/io.rs | 4 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/mod.rs | 7 - src/io_models.rs | 207 +----------------- test_utilities/src/aws.rs | 8 +- 7 files changed, 172 insertions(+), 227 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 9b8fc0cb..d92e3f79 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; +use crate::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; use crate::cloud_provider::helm::{ get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, @@ -53,7 +53,7 @@ pub struct ChartsConfigPrerequisites { pub cloudflare_api_token: String, pub disable_pleco: bool, // qovery options form json input - pub infra_options: Options, + pub infra_options: EksOptions, } pub fn aws_helm_charts( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 5b6936f5..5765d123 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -73,7 +73,7 @@ impl fmt::Display for VpcQoveryNetworkMode { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Options { +pub struct EksOptions { // AWS related pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, @@ -117,7 +117,7 @@ pub struct Options { pub tls_email_report: String, } -impl ProviderOptions for Options {} +impl ProviderOptions for EksOptions {} pub struct EKS { context: Context, @@ -132,7 +132,7 @@ pub struct EKS { s3: S3, nodes_groups: Vec, template_directory: String, - options: Options, + options: EksOptions, listeners: Listeners, logger: Box, } @@ -148,7 +148,7 @@ impl EKS { zones: Vec, cloud_provider: Arc>, dns_provider: Arc>, - options: Options, + options: EksOptions, nodes_groups: Vec, logger: Box, ) -> Result { @@ -1791,3 +1791,156 @@ impl Listen for EKS { self.listeners.push(listener); } } + +pub struct Ec2Options {} + +impl ProviderOptions for Ec2Options {} + +pub struct EC2 { + context: Context, + id: String, + long_id: uuid::Uuid, + name: String, + version: String, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + template_directory: String, + options: Ec2Options, + listeners: Listeners, + logger: Box, +} + +impl Kubernetes for EC2 { + fn context(&self) -> &Context { + todo!() + } + + fn kind(&self) -> Kind { + todo!() + } + + fn id(&self) -> &str { + todo!() + } + + fn name(&self) -> &str { + todo!() + } + + fn version(&self) -> &str { + todo!() + } + + fn region(&self) -> String { + todo!() + } + + fn zone(&self) -> &str { + todo!() + } + + fn aws_zones(&self) -> Option> { + todo!() + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + todo!() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + todo!() + } + + fn logger(&self) -> &dyn Logger { + todo!() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + todo!() + } + + fn is_valid(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_create(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_create_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + todo!() + } + + fn on_upgrade(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_upgrade_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_downgrade(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_downgrade_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_pause(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_pause_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_delete(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_delete_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn pause_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn delete_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } +} + +impl Listen for EC2 { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/io.rs b/src/cloud_provider/io.rs index ed74ed8a..dc8a5810 100644 --- a/src/cloud_provider/io.rs +++ b/src/cloud_provider/io.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::{Edge, Kind as KindModel}; +use crate::cloud_provider::Kind as KindModel; use serde_derive::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] @@ -7,7 +7,6 @@ pub enum Kind { Aws, Do, Scw, - Edge(Edge), } impl From for Kind { @@ -16,7 +15,6 @@ impl From for Kind { KindModel::Aws => Kind::Aws, KindModel::Do => Kind::Do, KindModel::Scw => Kind::Scw, - KindModel::Edge(Edge::Aws) => Kind::Edge(Edge::Aws), } } } diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 49174f8b..4ff9cf5c 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -364,6 +364,7 @@ pub trait KubernetesNode { #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { Eks, + Ec2, Doks, ScwKapsule, } @@ -372,6 +373,7 @@ impl Display for Kind { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Kind::Eks => "EKS", + Kind::Ec2 => "EC2", Kind::Doks => "DOKS", Kind::ScwKapsule => "ScwKapsule", }) @@ -404,6 +406,10 @@ pub fn deploy_environment( kubernetes, environment, }, + Kind::Ec2 => DeploymentTarget { + kubernetes, + environment, + }, Kind::Doks => DeploymentTarget { kubernetes, environment, @@ -1790,7 +1796,7 @@ mod tests { "systemUUID": "EC2E8B4C-92F9-213B-09B5-C0CD11A7EEB7" } } - } + } ], "kind": "List", "metadata": { diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index ba20716c..650b1d09 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -52,12 +52,6 @@ pub enum Kind { Aws, Do, Scw, - Edge(Edge), -} - -#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Edge { - Aws, } impl Display for Kind { @@ -66,7 +60,6 @@ impl Display for Kind { Kind::Aws => "AWS", Kind::Do => "Digital Ocean", Kind::Scw => "Scaleway", - Kind::Edge(Edge::Aws) => "Edge AWS", }) } } diff --git a/src/io_models.rs b/src/io_models.rs index ac79b514..5c57e8cd 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -16,8 +16,8 @@ use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; +use crate::cloud_provider::Kind as CPKind; use crate::cloud_provider::{service, CloudProvider}; -use crate::cloud_provider::{Edge, Kind as CPKind, Kind}; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; @@ -295,25 +295,6 @@ impl Application { listeners, logger.clone(), )?)), - Kind::Edge(Edge::Aws) => Ok(Box::new(models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - self.advance_settings.clone(), - AwsAppExtraSettings {}, - listeners, - logger.clone(), - )?)), } } @@ -605,22 +586,6 @@ impl Router { )?); Ok(router) } - Kind::Edge(Edge::Aws) => { - let router = Box::new(models::router::Router::::new( - context.clone(), - self.id.as_str(), - self.name.as_str(), - self.action.to_service_action(), - self.default_domain.as_str(), - custom_domains, - routes, - self.sticky_sessions_enabled, - AwsRouterExtraSettings {}, - listeners, - logger, - )?); - Ok(router) - } } } } @@ -1099,176 +1064,6 @@ impl Database { service::DatabaseType::MongoDB, SCW::full_name().to_string(), )), - - (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - - (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } } } } diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 36fb944c..5921dd71 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -2,7 +2,7 @@ extern crate serde; extern crate serde_derive; use const_format::formatcp; -use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; +use qovery_engine::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::models::NodeGroups; @@ -65,7 +65,7 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> None, ) } -impl Cluster for AWS { +impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, @@ -147,8 +147,8 @@ impl Cluster for AWS { ] } - fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { - Options { + fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> EksOptions { + EksOptions { eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From bb85c037249ee926796d48c7f48be0cff792a3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 21 Apr 2022 09:25:59 +0200 Subject: [PATCH 047/122] Change quay.io to docker.io as bitnami move their registry (#695) --- lib/aws/chart_values/mongodb/q-values.j2.yaml | 2 +- lib/aws/chart_values/mysql/q-values.j2.yaml | 2 +- lib/aws/chart_values/postgresql/q-values.j2.yaml | 2 +- lib/aws/chart_values/redis/q-values.j2.yaml | 2 +- lib/digitalocean/chart_values/mongodb/q-values.j2.yaml | 2 +- lib/digitalocean/chart_values/mysql/q-values.j2.yaml | 2 +- lib/digitalocean/chart_values/postgresql/q-values.j2.yaml | 2 +- lib/digitalocean/chart_values/redis/q-values.j2.yaml | 2 +- lib/scaleway/chart_values/mongodb/q-values.j2.yaml | 2 +- lib/scaleway/chart_values/mysql/q-values.j2.yaml | 2 +- lib/scaleway/chart_values/postgresql/q-values.j2.yaml | 2 +- lib/scaleway/chart_values/redis/q-values.j2.yaml | 2 +- src/cmd/structs.rs | 4 ++-- 13 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/aws/chart_values/mongodb/q-values.j2.yaml b/lib/aws/chart_values/mongodb/q-values.j2.yaml index 9f001e92..64a34332 100644 --- a/lib/aws/chart_values/mongodb/q-values.j2.yaml +++ b/lib/aws/chart_values/mongodb/q-values.j2.yaml @@ -13,7 +13,7 @@ image: ## Bitnami MongoDB registry ## - registry: quay.io + registry: docker.io ## Bitnami MongoDB image name ## repository: bitnami/mongodb diff --git a/lib/aws/chart_values/mysql/q-values.j2.yaml b/lib/aws/chart_values/mysql/q-values.j2.yaml index daa4475d..51cb92c3 100644 --- a/lib/aws/chart_values/mysql/q-values.j2.yaml +++ b/lib/aws/chart_values/mysql/q-values.j2.yaml @@ -13,7 +13,7 @@ ## image: debug: false - registry: quay.io + registry: docker.io repository: bitnami/mysql tag: "{{ version }}" diff --git a/lib/aws/chart_values/postgresql/q-values.j2.yaml b/lib/aws/chart_values/postgresql/q-values.j2.yaml index 3e908351..348a611c 100644 --- a/lib/aws/chart_values/postgresql/q-values.j2.yaml +++ b/lib/aws/chart_values/postgresql/q-values.j2.yaml @@ -13,7 +13,7 @@ global: ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/postgresql tag: "{{ version }}" ## Specify a imagePullPolicy diff --git a/lib/aws/chart_values/redis/q-values.j2.yaml b/lib/aws/chart_values/redis/q-values.j2.yaml index 007bb33a..69d7d354 100644 --- a/lib/aws/chart_values/redis/q-values.j2.yaml +++ b/lib/aws/chart_values/redis/q-values.j2.yaml @@ -13,7 +13,7 @@ global: ## ref: https://hub.docker.com/r/bitnami/redis/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/redis ## Bitnami Redis image tag ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links diff --git a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml index 6542e3ef..46506ac1 100644 --- a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml @@ -13,7 +13,7 @@ image: ## Bitnami MongoDB registry ## - registry: quay.io + registry: docker.io ## Bitnami MongoDB image name ## repository: bitnami/mongodb diff --git a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml index c51c70a0..5e288642 100644 --- a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml @@ -13,7 +13,7 @@ ## image: debug: false - registry: quay.io + registry: docker.io repository: bitnami/mysql tag: "{{ version }}" diff --git a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml index c61ac44d..a3757916 100644 --- a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml @@ -13,7 +13,7 @@ global: ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/postgresql tag: "{{ version }}" ## Specify a imagePullPolicy diff --git a/lib/digitalocean/chart_values/redis/q-values.j2.yaml b/lib/digitalocean/chart_values/redis/q-values.j2.yaml index 3cb4c035..e1db86e7 100644 --- a/lib/digitalocean/chart_values/redis/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/redis/q-values.j2.yaml @@ -13,7 +13,7 @@ global: ## ref: https://hub.docker.com/r/bitnami/redis/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/redis ## Bitnami Redis image tag ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links diff --git a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml index c8853729..12baf9b3 100644 --- a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml @@ -1,7 +1,7 @@ image: ## Bitnami MongoDB registry ## - registry: quay.io + registry: docker.io ## Bitnami MongoDB image name ## repository: bitnami/mongodb diff --git a/lib/scaleway/chart_values/mysql/q-values.j2.yaml b/lib/scaleway/chart_values/mysql/q-values.j2.yaml index e5d05db4..614cb3d2 100644 --- a/lib/scaleway/chart_values/mysql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mysql/q-values.j2.yaml @@ -13,7 +13,7 @@ ## image: debug: false - registry: quay.io + registry: docker.io repository: bitnami/mysql tag: "{{ version }}" diff --git a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml index 81ed0bb3..81a64782 100644 --- a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml @@ -2,7 +2,7 @@ ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/postgresql tag: "{{ version }}" ## Specify a imagePullPolicy diff --git a/lib/scaleway/chart_values/redis/q-values.j2.yaml b/lib/scaleway/chart_values/redis/q-values.j2.yaml index 5c85e762..80619204 100644 --- a/lib/scaleway/chart_values/redis/q-values.j2.yaml +++ b/lib/scaleway/chart_values/redis/q-values.j2.yaml @@ -2,7 +2,7 @@ ## ref: https://hub.docker.com/r/bitnami/redis/tags/ ## image: - registry: quay.io + registry: docker.io repository: bitnami/redis ## Bitnami Redis image tag ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links diff --git a/src/cmd/structs.rs b/src/cmd/structs.rs index ea39b03f..238024eb 100644 --- a/src/cmd/structs.rs +++ b/src/cmd/structs.rs @@ -896,7 +896,7 @@ mod tests { "value": false } ], - "image": "quay.io/bitnami/postgresql:10.16.0", + "image": "docker.io/bitnami/postgresql:10.16.0", "imagePullPolicy": "IfNotPresent", "livenessProbe": { "exec": { @@ -1129,7 +1129,7 @@ mod tests { "value": false } ], - "image": "quay.io/bitnami/postgresql:10.16.0", + "image": "docker.io/bitnami/postgresql:10.16.0", "imagePullPolicy": "IfNotPresent", "livenessProbe": { "exec": { From d21a867cc6136f89be7ca1ad8502b89e255c6e86 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 11:49:23 +0200 Subject: [PATCH 048/122] wip: add EC2.new(..) with inner properties --- .../aws/kubernetes/helm_charts.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 149 ++++++++++++------ test_utilities/src/aws.rs | 8 +- 3 files changed, 111 insertions(+), 50 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index d92e3f79..9b8fc0cb 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; +use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use crate::cloud_provider::helm::{ get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, @@ -53,7 +53,7 @@ pub struct ChartsConfigPrerequisites { pub cloudflare_api_token: String, pub disable_pleco: bool, // qovery options form json input - pub infra_options: EksOptions, + pub infra_options: Options, } pub fn aws_helm_charts( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 5765d123..6fcbc18f 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -73,8 +73,9 @@ impl fmt::Display for VpcQoveryNetworkMode { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EksOptions { +pub struct Options { // AWS related + // TODO add ec2_zone_x_subnet_blocks pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, pub eks_zone_c_subnet_blocks: Vec, @@ -117,7 +118,7 @@ pub struct EksOptions { pub tls_email_report: String, } -impl ProviderOptions for EksOptions {} +impl ProviderOptions for Options {} pub struct EKS { context: Context, @@ -132,7 +133,7 @@ pub struct EKS { s3: S3, nodes_groups: Vec, template_directory: String, - options: EksOptions, + options: Options, listeners: Listeners, logger: Box, } @@ -148,36 +149,14 @@ impl EKS { zones: Vec, cloud_provider: Arc>, dns_provider: Arc>, - options: EksOptions, + options: Options, nodes_groups: Vec, logger: Box, ) -> Result { - let event_details = EventDetails::new( - Some(cloud_provider.kind()), - QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), - QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), - QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), - Some(region.to_string()), - Stage::Infrastructure(InfrastructureStep::LoadConfiguration), - Transmitter::Kubernetes(id.to_string(), name.to_string()), - ); - + let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones: Vec = Vec::with_capacity(3); - for zone in zones { - match AwsZones::from_string(zone.to_string()) { - Ok(x) => aws_zones.push(x), - Err(e) => { - return Err(EngineError::new_unsupported_zone( - event_details, - region.to_string(), - zone, - CommandError::new_from_safe_message(e.to_string()), - )) - } - }; - } + let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { @@ -190,17 +169,7 @@ impl EKS { } } - // TODO export this - let s3 = S3::new( - context.clone(), - "s3-temp-id".to_string(), - "default-s3".to_string(), - cloud_provider.access_key_id(), - cloud_provider.secret_access_key(), - region.clone(), - true, - context.resource_expiration_in_seconds(), - ); + let s3 = s3(&context, ®ion, cloud_provider.as_ref()); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -1792,10 +1761,6 @@ impl Listen for EKS { } } -pub struct Ec2Options {} - -impl ProviderOptions for Ec2Options {} - pub struct EC2 { context: Context, id: String, @@ -1808,11 +1773,52 @@ pub struct EC2 { dns_provider: Arc>, s3: S3, template_directory: String, - options: Ec2Options, + options: Options, listeners: Listeners, logger: Box, } +impl EC2 { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + logger: Box, + ) -> Result { + let event_details = event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + + let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EC2 { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + template_directory, + logger, + listeners, + }) + } +} + impl Kubernetes for EC2 { fn context(&self) -> &Context { todo!() @@ -1944,3 +1950,58 @@ impl Listen for EC2 { self.listeners.push(listener); } } + +fn event_details>( + cloud_provider: &Box, + kubernetes_id: S, + kubernetes_name: S, + kubernetes_region: &AwsRegion, + context: &Context, +) -> EventDetails { + EventDetails::new( + Some(cloud_provider.kind()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), + Some(kubernetes_region.to_string()), + Stage::Infrastructure(InfrastructureStep::LoadConfiguration), + Transmitter::Kubernetes(kubernetes_id.into(), kubernetes_name.into()), + ) +} + +fn aws_zones( + zones: Vec, + region: &AwsRegion, + event_details: &EventDetails, +) -> Result, EngineError> { + let mut aws_zones = vec![]; + + for zone in zones { + match AwsZones::from_string(zone.to_string()) { + Ok(x) => aws_zones.push(x), + Err(e) => { + return Err(EngineError::new_unsupported_zone( + event_details.clone(), + region.to_string(), + zone, + CommandError::new_from_safe_message(e.to_string()), + )) + } + }; + } + + Ok(aws_zones) +} + +fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box) -> S3 { + S3::new( + context.clone(), + "s3-temp-id".to_string(), + "default-s3".to_string(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), + region.clone(), + true, + context.resource_expiration_in_seconds(), + ) +} diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 5921dd71..36fb944c 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -2,7 +2,7 @@ extern crate serde; extern crate serde_derive; use const_format::formatcp; -use qovery_engine::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; +use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::models::NodeGroups; @@ -65,7 +65,7 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> None, ) } -impl Cluster for AWS { +impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, @@ -147,8 +147,8 @@ impl Cluster for AWS { ] } - fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> EksOptions { - EksOptions { + fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { + Options { eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From 50ed72b331ba14c50435440094d896efa2608ddb Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:05:38 +0200 Subject: [PATCH 049/122] wip: prepare AWS EC2 Kubernetes provider --- src/cloud_provider/aws/kubernetes/mod.rs | 2397 ++++++++++++---------- test_utilities/src/aws.rs | 9 +- test_utilities/src/common.rs | 73 +- test_utilities/src/digitalocean.rs | 5 + test_utilities/src/scaleway.rs | 32 +- tests/aws/aws_kubernetes.rs | 13 +- tests/aws/aws_whole_enchilada.rs | 2 + tests/edge/aws/edge_aws_kubernetes.rs | 3 + tests/scaleway/scw_kubernetes.rs | 3 + tests/scaleway/scw_whole_enchilada.rs | 3 + 10 files changed, 1437 insertions(+), 1103 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 6fcbc18f..15eff3e6 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -156,7 +156,7 @@ impl EKS { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let aws_zones = aws_zones(zones, ®ion, &event_details)?; for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { @@ -192,51 +192,6 @@ impl EKS { }) } - fn get_engine_location(&self) -> EngineLocation { - self.options.qovery_engine_location.clone() - } - - fn kubeconfig_bucket_name(&self) -> String { - format!("qovery-kubeconfigs-{}", self.id()) - } - - fn managed_dns_resolvers_terraform_format(&self) -> String { - let managed_dns_resolvers: Vec = self - .dns_provider - .resolvers() - .iter() - .map(|x| format!("{}", x.clone())) - .collect(); - - terraform_list_format(managed_dns_resolvers) - } - - fn lets_encrypt_url(&self) -> String { - match &self.context.is_test_cluster() { - true => "https://acme-staging-v02.api.letsencrypt.org/directory", - false => "https://acme-v02.api.letsencrypt.org/directory", - } - .to_string() - } - - /// divide by 2 the total number of subnet to get the exact same number as private and public - fn check_odd_subnets( - &self, - event_details: EventDetails, - zone_name: &str, - subnet_block: &[String], - ) -> Result { - if subnet_block.len() % 2 == 1 { - return Err(EngineError::new_subnets_count_is_not_even( - event_details, - zone_name.to_string(), - subnet_block.len(), - )); - } - - Ok((subnet_block.len() / 2) as usize) - } - fn set_cluster_autoscaler_replicas( &self, event_details: EventDetails, @@ -270,983 +225,6 @@ impl EKS { Ok(()) } - fn tera_context(&self) -> Result { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); - let mut context = TeraContext::new(); - - let format_ips = - |ips: &Vec| -> Vec { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::>() }; - let format_zones = |zones: &Vec| -> Vec { - zones - .iter() - .map(|zone| zone.to_terraform_format_string()) - .collect::>() - }; - - let aws_zones = format_zones(&self.zones); - - let mut eks_zone_a_subnet_blocks_private = format_ips(&self.options.eks_zone_a_subnet_blocks); - let mut eks_zone_b_subnet_blocks_private = format_ips(&self.options.eks_zone_b_subnet_blocks); - let mut eks_zone_c_subnet_blocks_private = format_ips(&self.options.eks_zone_c_subnet_blocks); - - match self.options.vpc_qovery_network_mode { - VpcQoveryNetworkMode::WithNatGateways => { - let max_subnet_zone_a = - self.check_odd_subnets(event_details.clone(), "a", &eks_zone_a_subnet_blocks_private)?; - let max_subnet_zone_b = - self.check_odd_subnets(event_details.clone(), "b", &eks_zone_b_subnet_blocks_private)?; - let max_subnet_zone_c = - self.check_odd_subnets(event_details.clone(), "c", &eks_zone_c_subnet_blocks_private)?; - - let eks_zone_a_subnet_blocks_public: Vec = - eks_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); - let eks_zone_b_subnet_blocks_public: Vec = - eks_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); - let eks_zone_c_subnet_blocks_public: Vec = - eks_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); - - context.insert("eks_zone_a_subnet_blocks_public", &eks_zone_a_subnet_blocks_public); - context.insert("eks_zone_b_subnet_blocks_public", &eks_zone_b_subnet_blocks_public); - context.insert("eks_zone_c_subnet_blocks_public", &eks_zone_c_subnet_blocks_public); - } - VpcQoveryNetworkMode::WithoutNatGateways => {} - }; - context.insert("vpc_qovery_network_mode", &self.options.vpc_qovery_network_mode.to_string()); - - let rds_zone_a_subnet_blocks = format_ips(&self.options.rds_zone_a_subnet_blocks); - let rds_zone_b_subnet_blocks = format_ips(&self.options.rds_zone_b_subnet_blocks); - let rds_zone_c_subnet_blocks = format_ips(&self.options.rds_zone_c_subnet_blocks); - - let documentdb_zone_a_subnet_blocks = format_ips(&self.options.documentdb_zone_a_subnet_blocks); - let documentdb_zone_b_subnet_blocks = format_ips(&self.options.documentdb_zone_b_subnet_blocks); - let documentdb_zone_c_subnet_blocks = format_ips(&self.options.documentdb_zone_c_subnet_blocks); - - let elasticache_zone_a_subnet_blocks = format_ips(&self.options.elasticache_zone_a_subnet_blocks); - let elasticache_zone_b_subnet_blocks = format_ips(&self.options.elasticache_zone_b_subnet_blocks); - let elasticache_zone_c_subnet_blocks = format_ips(&self.options.elasticache_zone_c_subnet_blocks); - - let elasticsearch_zone_a_subnet_blocks = format_ips(&self.options.elasticsearch_zone_a_subnet_blocks); - let elasticsearch_zone_b_subnet_blocks = format_ips(&self.options.elasticsearch_zone_b_subnet_blocks); - let elasticsearch_zone_c_subnet_blocks = format_ips(&self.options.elasticsearch_zone_c_subnet_blocks); - - let region_cluster_id = format!("{}-{}", self.region(), self.id()); - let vpc_cidr_block = self.options.vpc_cidr_block.clone(); - let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", self.id()); - let eks_cidr_subnet = self.options.eks_cidr_subnet.clone(); - - let eks_access_cidr_blocks = format_ips(&self.options.eks_access_cidr_blocks); - - let qovery_api_url = self.options.qovery_api_url.clone(); - let rds_cidr_subnet = self.options.rds_cidr_subnet.clone(); - let documentdb_cidr_subnet = self.options.documentdb_cidr_subnet.clone(); - let elasticache_cidr_subnet = self.options.elasticache_cidr_subnet.clone(); - let elasticsearch_cidr_subnet = self.options.elasticsearch_cidr_subnet.clone(); - - // Qovery - context.insert("organization_id", self.cloud_provider.organization_id()); - context.insert("qovery_api_url", &qovery_api_url); - - context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); - context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); - - context.insert("test_cluster", &self.context.is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - context.insert("force_upgrade", &self.context.requires_forced_upgrade()); - - // Qovery features - context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); - context.insert( - "metrics_history_enabled", - &self.context.is_feature_enabled(&Features::MetricsHistory), - ); - - // DNS configuration - let managed_dns_list = vec![self.dns_provider.name()]; - let managed_dns_domains_helm_format = vec![self.dns_provider.domain().to_string()]; - let managed_dns_domains_root_helm_format = vec![self.dns_provider.domain().root_domain().to_string()]; - let managed_dns_domains_terraform_format = terraform_list_format(vec![self.dns_provider.domain().to_string()]); - let managed_dns_domains_root_terraform_format = - terraform_list_format(vec![self.dns_provider.domain().root_domain().to_string()]); - let managed_dns_resolvers_terraform_format = self.managed_dns_resolvers_terraform_format(); - - context.insert("managed_dns", &managed_dns_list); - context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); - context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); - context.insert( - "managed_dns_domains_root_terraform_format", - &managed_dns_domains_root_terraform_format, - ); - context.insert( - "managed_dns_resolvers_terraform_format", - &managed_dns_resolvers_terraform_format, - ); - - match self.dns_provider.kind() { - dns_provider::Kind::Cloudflare => { - context.insert("external_dns_provider", self.dns_provider.provider_name()); - context.insert("cloudflare_api_token", self.dns_provider.token()); - context.insert("cloudflare_email", self.dns_provider.account()); - } - }; - - context.insert("dns_email_report", &self.options.tls_email_report); - - // TLS - context.insert("acme_server_url", &self.lets_encrypt_url()); - - // Vault - context.insert("vault_auth_method", "none"); - - if env::var_os("VAULT_ADDR").is_some() { - // select the correct used method - match env::var_os("VAULT_ROLE_ID") { - Some(role_id) => { - context.insert("vault_auth_method", "app_role"); - context.insert("vault_role_id", role_id.to_str().unwrap()); - - match env::var_os("VAULT_SECRET_ID") { - Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log(EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details, - "VAULT_SECRET_ID".to_string(), - ), - None, - )), - } - } - None => { - if env::var_os("VAULT_TOKEN").is_some() { - context.insert("vault_auth_method", "token") - } - } - } - }; - - // Other Kubernetes - context.insert("kubernetes_cluster_name", &self.cluster_name()); - context.insert("enable_cluster_autoscaler", &true); - - // AWS - context.insert("aws_access_key", &self.cloud_provider.access_key_id()); - context.insert("aws_secret_key", &self.cloud_provider.secret_access_key()); - - // AWS S3 tfstate storage - context.insert( - "aws_access_key_tfstates_account", - self.cloud_provider() - .terraform_state_credentials() - .access_key_id - .as_str(), - ); - - context.insert( - "aws_secret_key_tfstates_account", - self.cloud_provider() - .terraform_state_credentials() - .secret_access_key - .as_str(), - ); - context.insert( - "aws_region_tfstates_account", - self.cloud_provider().terraform_state_credentials().region.as_str(), - ); - - context.insert("aws_region", &self.region()); - context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); - context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); - context.insert("vpc_cidr_block", &vpc_cidr_block); - context.insert("vpc_custom_routing_table", &self.options.vpc_custom_routing_table); - context.insert("s3_kubeconfig_bucket", &self.kubeconfig_bucket_name()); - - // AWS - EKS - context.insert("aws_availability_zones", &aws_zones); - context.insert("eks_cidr_subnet", &eks_cidr_subnet); - context.insert("kubernetes_cluster_name", &self.name()); - context.insert("kubernetes_cluster_id", self.id()); - context.insert("kubernetes_full_cluster_id", self.context.cluster_id()); - context.insert("eks_region_cluster_id", region_cluster_id.as_str()); - context.insert("eks_worker_nodes", &self.nodes_groups); - context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); - context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); - context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); - context.insert("eks_masters_version", &self.version()); - context.insert("eks_workers_version", &self.version()); - context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); - context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); - - // AWS - RDS - context.insert("rds_cidr_subnet", &rds_cidr_subnet); - context.insert("rds_zone_a_subnet_blocks", &rds_zone_a_subnet_blocks); - context.insert("rds_zone_b_subnet_blocks", &rds_zone_b_subnet_blocks); - context.insert("rds_zone_c_subnet_blocks", &rds_zone_c_subnet_blocks); - - // AWS - DocumentDB - context.insert("documentdb_cidr_subnet", &documentdb_cidr_subnet); - context.insert("documentdb_zone_a_subnet_blocks", &documentdb_zone_a_subnet_blocks); - context.insert("documentdb_zone_b_subnet_blocks", &documentdb_zone_b_subnet_blocks); - context.insert("documentdb_zone_c_subnet_blocks", &documentdb_zone_c_subnet_blocks); - - // AWS - Elasticache - context.insert("elasticache_cidr_subnet", &elasticache_cidr_subnet); - context.insert("elasticache_zone_a_subnet_blocks", &elasticache_zone_a_subnet_blocks); - context.insert("elasticache_zone_b_subnet_blocks", &elasticache_zone_b_subnet_blocks); - context.insert("elasticache_zone_c_subnet_blocks", &elasticache_zone_c_subnet_blocks); - - // AWS - Elasticsearch - context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); - context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); - context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); - context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); - - // grafana credentials - context.insert("grafana_admin_user", self.options.grafana_admin_user.as_str()); - context.insert("grafana_admin_password", self.options.grafana_admin_password.as_str()); - - // qovery - context.insert("qovery_api_url", self.options.qovery_api_url.as_str()); - context.insert("qovery_nats_url", self.options.qovery_nats_url.as_str()); - context.insert("qovery_nats_user", self.options.qovery_nats_user.as_str()); - context.insert("qovery_nats_password", self.options.qovery_nats_password.as_str()); - context.insert("qovery_ssh_key", self.options.qovery_ssh_key.as_str()); - context.insert("discord_api_key", self.options.discord_api_key.as_str()); - - Ok(context) - } - - fn create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), - )); - self.send_to_customer( - format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - // upgrade cluster instead if required - match self.get_kubeconfig_file() { - Ok((path, _)) => match is_kubernetes_upgrade_required( - path, - &self.version, - self.cloud_provider.credentials_environment_variables(), - event_details.clone(), - self.logger(), - ) { - Ok(x) => { - if x.required_upgrade_on.is_some() { - return self.upgrade_with_status(x); - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - )) - } - Err(e) => { - self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - )))); - } - }, - Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) - - }; - - // create AWS IAM roles - let already_created_roles = get_default_roles_to_create(); - for role in already_created_roles { - match role.create_service_linked_role( - self.cloud_provider.access_key_id().as_str(), - self.cloud_provider.secret_access_key().as_str(), - ) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Role {} is already present, no need to create", - role.role_name - )), - )), - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), - None, - )), - } - } - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let context = self.tera_context()?; - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), - )); - self.send_to_customer( - format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - - // terraform deployment dedicated to cloud resources - if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - - // kubernetes helm deployments on the cluster - // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - - let credentials_environment_variables: Vec<(String, String)> = self - .cloud_provider - .credentials_environment_variables() - .into_iter() - .map(|x| (x.0.to_string(), x.1.to_string())) - .collect(); - - let charts_prerequisites = ChartsConfigPrerequisites { - organization_id: self.cloud_provider.organization_id().to_string(), - organization_long_id: self.cloud_provider.organization_long_id(), - infra_options: self.options.clone(), - cluster_id: self.id.clone(), - cluster_long_id: self.long_id, - region: self.region(), - cluster_name: self.cluster_name(), - cloud_provider: "aws".to_string(), - test_cluster: self.context.is_test_cluster(), - aws_access_key_id: self.cloud_provider.access_key_id(), - aws_secret_access_key: self.cloud_provider.secret_access_key(), - vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(), - qovery_engine_location: self.get_engine_location(), - ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory), - ff_metrics_history_enabled: self.context.is_feature_enabled(&Features::MetricsHistory), - managed_dns_name: self.dns_provider.domain().to_string(), - managed_dns_helm_format: self.dns_provider.domain().to_helm_format_string(), - managed_dns_resolvers_terraform_format: self.managed_dns_resolvers_terraform_format(), - external_dns_provider: self.dns_provider.provider_name().to_string(), - dns_email_report: self.options.tls_email_report.clone(), - acme_url: self.lets_encrypt_url(), - cloudflare_email: self.dns_provider.account().to_string(), - cloudflare_api_token: self.dns_provider.token().to_string(), - disable_pleco: self.context.disable_pleco(), - }; - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - )); - let helm_charts_to_deploy = aws_helm_charts( - format!("{}/qovery-tf-config.json", &temp_dir).as_str(), - &charts_prerequisites, - Some(&temp_dir), - kubeconfig_path, - &credentials_environment_variables, - ) - .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; - - deploy_charts_levels( - kubeconfig_path, - &credentials_environment_variables, - helm_charts_to_deploy, - self.context.is_dry_run_deploy(), - ) - .map_err(|e| EngineError::new_helm_charts_deploy_error(event_details.clone(), e)) - } - - fn create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - let (kubeconfig_path, _) = self.get_kubeconfig_file()?; - let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("EKS.create_error() called.".to_string()), - )); - - match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self - .logger() - .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), - Err(err) => self.logger().log(EngineEvent::Warning( - event_details, - EventMessage::new( - "Error trying to get kubernetes events".to_string(), - Some(err.message(ErrorMessageVerbosity::FullDetails)), - ), - )), - }; - - Ok(()) - } - - fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), - )); - - Ok(()) - } - - fn downgrade(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), - )); - - Ok(()) - } - - fn pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.send_to_customer( - format!("Preparing EKS {} cluster pause with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - self.logger().log(EngineEvent::Info( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let mut context = self.tera_context()?; - - // pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc... - let worker_nodes: Vec = Vec::new(); - context.insert("eks_worker_nodes", &worker_nodes); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - // pause: only select terraform workers elements to pause to avoid applying on the whole config - // this to avoid failures because of helm deployments on removing workers nodes - let tf_workers_resources = match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let mut tf_workers_resources_name = Vec::new(); - for name in x { - if name.starts_with("aws_eks_node_group.") { - tf_workers_resources_name.push(name); - } - } - tf_workers_resources_name - } - Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details, e); - self.logger().log(EngineEvent::Error(error.clone(), None)); - return Err(error); - } - }; - - if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); - } - - let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; - - // pause: wait 1h for the engine to have 0 running jobs before pausing and avoid getting unreleased lock (from helm or terraform for example) - if self.get_engine_location() == EngineLocation::ClientSide { - match self.context.is_feature_enabled(&Features::MetricsHistory) { - true => { - let metric_name = "taskmanager_nb_running_tasks"; - let wait_engine_job_finish = retry::retry(Fixed::from_millis(60000).take(60), || { - return match kubectl_exec_api_custom_metrics( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - "qovery", - None, - metric_name, - ) { - Ok(metrics) => { - let mut current_engine_jobs = 0; - - for metric in metrics.items { - match metric.value.parse::() { - Ok(job_count) if job_count > 0 => current_engine_jobs += 1, - Err(e) => { - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics( - event_details.clone(), - CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); - } - _ => {} - } - } - - if current_engine_jobs == 0 { - OperationResult::Ok(()) - } else { - OperationResult::Retry(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), None)) - } - } - Err(e) => { - OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), e)) - } - }; - }); - - match wait_engine_job_finish { - Ok(_) => { - self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); - } - Err(Operation { error, .. }) => { - return Err(error) - } - Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) - } - } - } - false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), - } - } - - let mut terraform_args_string = vec!["apply".to_string(), "-auto-approve".to_string()]; - for x in tf_workers_resources { - terraform_args_string.push(format!("-target={}", x)); - } - let terraform_args = terraform_args_string.iter().map(|x| &**x).collect(); - - self.send_to_customer( - format!("Pausing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), - )); - - match terraform_exec(temp_dir.as_str(), terraform_args) { - Ok(_) => { - let message = format!("Kubernetes cluster {} successfully paused", self.name()); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); - Ok(()) - } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), - } - } - - fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), - )); - - Ok(()) - } - - fn delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; - - self.send_to_customer( - format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let context = self.tera_context()?; - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message(ErrorMessageVerbosity::FullDetails))), - )); - - skip_kubernetes_step = true; - "".to_string() - } - }; - - // should apply before destroy to be sure destroy will compute on all resources - // don't exit on failure, it can happen if we resume a destroy process - let message = format!( - "Ensuring everything is up to date before deleting cluster {}/{}", - self.name(), - self.id() - ); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - )); - if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { - // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log(EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - )); - }; - - if !skip_kubernetes_step { - // should make the diff between all namespaces and qovery managed namespaces - let message = format!( - "Deleting all non-Qovery deployed applications and dependencies for cluster {}/{}", - self.name(), - self.id() - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(message.to_string()), - )); - self.send_to_customer(&message, &listeners_helper); - - let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - ); - - match all_namespaces { - Ok(namespace_vec) => { - let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); - let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - )); - - for namespace_to_delete in namespaces_to_delete.iter() { - match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, - namespace_to_delete, - self.cloud_provider().credentials_environment_variables(), - ) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - )), - Err(e) => { - if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - )); - } - } - } - } - } - Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), - )); - } - } - - let message = format!( - "Deleting all Qovery deployed elements and associated dependencies for cluster {}/{}", - self.name(), - self.id() - ); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubernetes_config_file_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; - let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &[]) - .map_err(|e| to_engine_error(&event_details, e))?; - - // required to avoid namespace stuck on deletion - uninstall_cert_manager( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - event_details.clone(), - self.logger(), - )?; - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - )); - - let qovery_namespaces = get_qovery_managed_namespaces(); - for qovery_namespace in qovery_namespaces.iter() { - let charts_to_delete = helm - .list_release(Some(qovery_namespace), &[]) - .map_err(|e| to_engine_error(&event_details, e))?; - - for chart in charts_to_delete { - let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - )), - Err(e) => { - let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } - } - } - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - )); - - for qovery_namespace in qovery_namespaces.iter() { - let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, - qovery_namespace, - self.cloud_provider().credentials_environment_variables(), - ); - match deletion { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - )), - Err(e) => { - if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), - )) - } - } - } - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - )); - - match helm.list_release(None, &[]) { - Ok(helm_charts) => { - for chart in helm_charts { - let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - )), - Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } - } - } - } - Err(e) => { - let message_safe = "Unable to get helm list"; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - )) - } - } - }; - - let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - )); - - match retry::retry(Fibonacci::from_millis(60000).take(3), || { - match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { - Ok(_) => OperationResult::Ok(()), - Err(e) => OperationResult::Retry(e), - } - }) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details, - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - )); - Ok(()) - } - Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - error, - )), - Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - CommandError::new("Error while trying to perform Terraform destroy".to_string(), Some(msg), None), - )), - } - } - - fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), - )); - - Ok(()) - } - fn cloud_provider_name(&self) -> &str { "aws" } @@ -1320,7 +298,16 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.create()) + send_progress_on_long_task(self, Action::Create, || { + create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1334,7 +321,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.create_error()) + send_progress_on_long_task(self, Action::Create, || create_error(self)) } fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { @@ -1358,7 +345,7 @@ impl Kubernetes for EKS { let temp_dir = self.get_temp_dir(event_details.clone())?; // generate terraform files and copy them into temp dir - let mut context = self.tera_context()?; + let mut context = tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; // // Upgrade master nodes @@ -1579,7 +566,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) + send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) } #[named] @@ -1593,7 +580,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.downgrade()) + send_progress_on_long_task(self, Action::Create, || downgrade()) } #[named] @@ -1607,7 +594,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) + send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) } #[named] @@ -1621,7 +608,15 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Pause, || self.pause()) + send_progress_on_long_task(self, Action::Pause, || { + pause( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1635,7 +630,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Pause, || self.pause_error()) + send_progress_on_long_task(self, Action::Pause, || pause_error(self)) } #[named] @@ -1649,7 +644,15 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Delete, || self.delete()) + send_progress_on_long_task(self, Action::Delete, || { + delete( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1663,7 +666,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Delete, || self.delete_error()) + send_progress_on_long_task(self, Action::Delete, || delete_error(self)) } #[named] @@ -1795,7 +798,7 @@ impl EC2 { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let aws_zones = aws_zones(zones, ®ion, &event_details)?; let s3 = s3(&context, ®ion, cloud_provider.as_ref()); // copy listeners from CloudProvider @@ -1817,127 +820,309 @@ impl EC2 { listeners, }) } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } } impl Kubernetes for EC2 { fn context(&self) -> &Context { - todo!() + &self.context } fn kind(&self) -> Kind { - todo!() + Kind::Ec2 } fn id(&self) -> &str { - todo!() + self.id.as_str() } fn name(&self) -> &str { - todo!() + self.name.as_str() } fn version(&self) -> &str { - todo!() + self.version.as_str() } fn region(&self) -> String { - todo!() + self.region.to_aws_format() } fn zone(&self) -> &str { - todo!() + "" } fn aws_zones(&self) -> Option> { - todo!() + Some(self.zones.clone()) } fn cloud_provider(&self) -> &dyn CloudProvider { - todo!() + (*self.cloud_provider).borrow() } fn dns_provider(&self) -> &dyn DnsProvider { - todo!() + (*self.dns_provider).borrow() } fn logger(&self) -> &dyn Logger { - todo!() + self.logger.borrow() } fn config_file_store(&self) -> &dyn ObjectStorage { - todo!() + &self.s3 } fn is_valid(&self) -> Result<(), EngineError> { - todo!() + Ok(()) } + #[named] fn on_create(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &vec![], + &self.options, + ) + }) } + #[named] fn on_create_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || create_error(self)) } - fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - todo!() + fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + // TODO + Ok(()) } + #[named] fn on_upgrade(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) } + #[named] fn on_upgrade_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) } + #[named] fn on_downgrade(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || downgrade()) } + #[named] fn on_downgrade_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) } + #[named] fn on_pause(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) } + #[named] fn on_pause_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || pause_error(self)) } + #[named] fn on_delete(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) } + #[named] fn on_delete_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || delete_error(self)) } + #[named] fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::deploy_environment(self, environment, event_details, self.logger()) } + #[named] fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) } + #[named] fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::pause_environment(self, environment, event_details, self.logger()) } - fn pause_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } + #[named] fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::delete_environment(self, environment, event_details, self.logger()) } - fn delete_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } } @@ -2005,3 +1190,1081 @@ fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box Result { + if subnet_block.len() % 2 == 1 { + return Err(EngineError::new_subnets_count_is_not_even( + event_details, + zone_name.to_string(), + subnet_block.len(), + )); + } + + Ok((subnet_block.len() / 2) as usize) +} + +fn lets_encrypt_url(context: &Context) -> String { + match context.is_test_cluster() { + true => "https://acme-staging-v02.api.letsencrypt.org/directory", + false => "https://acme-v02.api.letsencrypt.org/directory", + } + .to_string() +} + +fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> String { + let managed_dns_resolvers = dns_provider + .resolvers() + .iter() + .map(|x| format!("{}", x.clone())) + .collect::>(); + + terraform_list_format(managed_dns_resolvers) +} + +fn tera_context( + kubernetes: &dyn Kubernetes, + zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); + let mut context = TeraContext::new(); + + let format_ips = + |ips: &Vec| -> Vec { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::>() }; + + let aws_zones = zones + .iter() + .map(|zone| zone.to_terraform_format_string()) + .collect::>(); + + let mut eks_zone_a_subnet_blocks_private = format_ips(&options.eks_zone_a_subnet_blocks); + let mut eks_zone_b_subnet_blocks_private = format_ips(&options.eks_zone_b_subnet_blocks); + let mut eks_zone_c_subnet_blocks_private = format_ips(&options.eks_zone_c_subnet_blocks); + + match options.vpc_qovery_network_mode { + VpcQoveryNetworkMode::WithNatGateways => { + let max_subnet_zone_a = check_odd_subnets(event_details.clone(), "a", &eks_zone_a_subnet_blocks_private)?; + let max_subnet_zone_b = check_odd_subnets(event_details.clone(), "b", &eks_zone_b_subnet_blocks_private)?; + let max_subnet_zone_c = check_odd_subnets(event_details.clone(), "c", &eks_zone_c_subnet_blocks_private)?; + + let eks_zone_a_subnet_blocks_public: Vec = + eks_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); + let eks_zone_b_subnet_blocks_public: Vec = + eks_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); + let eks_zone_c_subnet_blocks_public: Vec = + eks_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); + + context.insert("eks_zone_a_subnet_blocks_public", &eks_zone_a_subnet_blocks_public); + context.insert("eks_zone_b_subnet_blocks_public", &eks_zone_b_subnet_blocks_public); + context.insert("eks_zone_c_subnet_blocks_public", &eks_zone_c_subnet_blocks_public); + } + VpcQoveryNetworkMode::WithoutNatGateways => {} + }; + + context.insert("vpc_qovery_network_mode", &options.vpc_qovery_network_mode.to_string()); + + let rds_zone_a_subnet_blocks = format_ips(&options.rds_zone_a_subnet_blocks); + let rds_zone_b_subnet_blocks = format_ips(&options.rds_zone_b_subnet_blocks); + let rds_zone_c_subnet_blocks = format_ips(&options.rds_zone_c_subnet_blocks); + + let documentdb_zone_a_subnet_blocks = format_ips(&options.documentdb_zone_a_subnet_blocks); + let documentdb_zone_b_subnet_blocks = format_ips(&options.documentdb_zone_b_subnet_blocks); + let documentdb_zone_c_subnet_blocks = format_ips(&options.documentdb_zone_c_subnet_blocks); + + let elasticache_zone_a_subnet_blocks = format_ips(&options.elasticache_zone_a_subnet_blocks); + let elasticache_zone_b_subnet_blocks = format_ips(&options.elasticache_zone_b_subnet_blocks); + let elasticache_zone_c_subnet_blocks = format_ips(&options.elasticache_zone_c_subnet_blocks); + + let elasticsearch_zone_a_subnet_blocks = format_ips(&options.elasticsearch_zone_a_subnet_blocks); + let elasticsearch_zone_b_subnet_blocks = format_ips(&options.elasticsearch_zone_b_subnet_blocks); + let elasticsearch_zone_c_subnet_blocks = format_ips(&options.elasticsearch_zone_c_subnet_blocks); + + let region_cluster_id = format!("{}-{}", kubernetes.region(), kubernetes.id()); + let vpc_cidr_block = options.vpc_cidr_block.clone(); + let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", kubernetes.id()); + let eks_cidr_subnet = options.eks_cidr_subnet.clone(); + + let eks_access_cidr_blocks = format_ips(&options.eks_access_cidr_blocks); + + let qovery_api_url = options.qovery_api_url.clone(); + let rds_cidr_subnet = options.rds_cidr_subnet.clone(); + let documentdb_cidr_subnet = options.documentdb_cidr_subnet.clone(); + let elasticache_cidr_subnet = options.elasticache_cidr_subnet.clone(); + let elasticsearch_cidr_subnet = options.elasticsearch_cidr_subnet.clone(); + + // Qovery + context.insert("organization_id", kubernetes.cloud_provider().organization_id()); + context.insert("qovery_api_url", &qovery_api_url); + + context.insert("engine_version_controller_token", &options.engine_version_controller_token); + context.insert("agent_version_controller_token", &options.agent_version_controller_token); + + context.insert("test_cluster", &kubernetes.context().is_test_cluster()); + + if let Some(resource_expiration_in_seconds) = kubernetes.context().resource_expiration_in_seconds() { + context.insert("resource_expiration_in_seconds", &resource_expiration_in_seconds); + } + + context.insert("force_upgrade", &kubernetes.context().requires_forced_upgrade()); + + // Qovery features + context.insert( + "log_history_enabled", + &kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ); + context.insert( + "metrics_history_enabled", + &kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + ); + + // DNS configuration + let managed_dns_list = vec![kubernetes.dns_provider().name()]; + let managed_dns_domains_helm_format = vec![kubernetes.dns_provider().domain().to_string()]; + let managed_dns_domains_root_helm_format = vec![kubernetes.dns_provider().domain().root_domain().to_string()]; + let managed_dns_domains_terraform_format = + terraform_list_format(vec![kubernetes.dns_provider().domain().to_string()]); + let managed_dns_domains_root_terraform_format = + terraform_list_format(vec![kubernetes.dns_provider().domain().root_domain().to_string()]); + let managed_dns_resolvers_terraform_format = managed_dns_resolvers_terraform_format(kubernetes.dns_provider()); + + context.insert("managed_dns", &managed_dns_list); + context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); + context.insert( + "managed_dns_domains_root_terraform_format", + &managed_dns_domains_root_terraform_format, + ); + context.insert( + "managed_dns_resolvers_terraform_format", + &managed_dns_resolvers_terraform_format, + ); + + match kubernetes.dns_provider().kind() { + dns_provider::Kind::Cloudflare => { + context.insert("external_dns_provider", kubernetes.dns_provider().provider_name()); + context.insert("cloudflare_api_token", kubernetes.dns_provider().token()); + context.insert("cloudflare_email", kubernetes.dns_provider().account()); + } + }; + + context.insert("dns_email_report", &options.tls_email_report); + + // TLS + context.insert("acme_server_url", &lets_encrypt_url(kubernetes.context())); + + // Vault + context.insert("vault_auth_method", "none"); + + if env::var_os("VAULT_ADDR").is_some() { + // select the correct used method + match env::var_os("VAULT_ROLE_ID") { + Some(role_id) => { + context.insert("vault_auth_method", "app_role"); + context.insert("vault_role_id", role_id.to_str().unwrap()); + + match env::var_os("VAULT_SECRET_ID") { + Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), + None => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable(event_details, "VAULT_SECRET_ID".to_string()), + None, + )), + } + } + None => { + if env::var_os("VAULT_TOKEN").is_some() { + context.insert("vault_auth_method", "token") + } + } + } + }; + + // Other Kubernetes + context.insert("kubernetes_cluster_name", &kubernetes.cluster_name()); + context.insert("enable_cluster_autoscaler", &true); + + // AWS + context.insert("aws_access_key", &kubernetes.cloud_provider().access_key_id()); + context.insert("aws_secret_key", &kubernetes.cloud_provider().secret_access_key()); + + // AWS S3 tfstate storage + context.insert( + "aws_access_key_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .access_key_id + .as_str(), + ); + + context.insert( + "aws_secret_key_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .secret_access_key + .as_str(), + ); + context.insert( + "aws_region_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .region + .as_str(), + ); + + context.insert("aws_region", &kubernetes.region()); + context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); + context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); + context.insert("vpc_cidr_block", &vpc_cidr_block); + context.insert("vpc_custom_routing_table", &options.vpc_custom_routing_table); + context.insert("s3_kubeconfig_bucket", &format!("qovery-kubeconfigs-{}", kubernetes.id())); + + // AWS - EKS + context.insert("aws_availability_zones", &aws_zones); + context.insert("eks_cidr_subnet", &eks_cidr_subnet); + context.insert("kubernetes_cluster_name", kubernetes.name()); + context.insert("kubernetes_cluster_id", kubernetes.id()); + context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); + context.insert("eks_region_cluster_id", region_cluster_id.as_str()); + context.insert("eks_worker_nodes", &node_groups); // FIXME + context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); + context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); + context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); + context.insert("eks_masters_version", &kubernetes.version()); + context.insert("eks_workers_version", &kubernetes.version()); + context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); + context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); + + // AWS - RDS + context.insert("rds_cidr_subnet", &rds_cidr_subnet); + context.insert("rds_zone_a_subnet_blocks", &rds_zone_a_subnet_blocks); + context.insert("rds_zone_b_subnet_blocks", &rds_zone_b_subnet_blocks); + context.insert("rds_zone_c_subnet_blocks", &rds_zone_c_subnet_blocks); + + // AWS - DocumentDB + context.insert("documentdb_cidr_subnet", &documentdb_cidr_subnet); + context.insert("documentdb_zone_a_subnet_blocks", &documentdb_zone_a_subnet_blocks); + context.insert("documentdb_zone_b_subnet_blocks", &documentdb_zone_b_subnet_blocks); + context.insert("documentdb_zone_c_subnet_blocks", &documentdb_zone_c_subnet_blocks); + + // AWS - Elasticache + context.insert("elasticache_cidr_subnet", &elasticache_cidr_subnet); + context.insert("elasticache_zone_a_subnet_blocks", &elasticache_zone_a_subnet_blocks); + context.insert("elasticache_zone_b_subnet_blocks", &elasticache_zone_b_subnet_blocks); + context.insert("elasticache_zone_c_subnet_blocks", &elasticache_zone_c_subnet_blocks); + + // AWS - Elasticsearch + context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); + context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); + context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); + context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); + + // grafana credentials + context.insert("grafana_admin_user", options.grafana_admin_user.as_str()); + context.insert("grafana_admin_password", options.grafana_admin_password.as_str()); + + // qovery + context.insert("qovery_api_url", options.qovery_api_url.as_str()); + context.insert("qovery_nats_url", options.qovery_nats_url.as_str()); + context.insert("qovery_nats_user", options.qovery_nats_user.as_str()); + context.insert("qovery_nats_password", options.qovery_nats_password.as_str()); + context.insert("qovery_ssh_key", options.qovery_ssh_key.as_str()); + context.insert("discord_api_key", options.discord_api_key.as_str()); + + Ok(context) +} + +fn create( + kubernetes: &dyn Kubernetes, + kubernetes_long_id: uuid::Uuid, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), + )); + + kubernetes.send_to_customer( + format!( + "Preparing {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + // upgrade cluster instead if required + match kubernetes.get_kubeconfig_file() { + Ok((path, _)) => match is_kubernetes_upgrade_required( + path, + kubernetes.version(), + kubernetes.cloud_provider().credentials_environment_variables(), + event_details.clone(), + kubernetes.logger(), + ) { + Ok(x) => { + if x.required_upgrade_on.is_some() { + return kubernetes.upgrade_with_status(x); + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) + } + Err(e) => { + kubernetes.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); + } + }, + Err(_) => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + + }; + + // create AWS IAM roles + let already_created_roles = get_default_roles_to_create(); + for role in already_created_roles { + match role.create_service_linked_role( + kubernetes.cloud_provider().access_key_id().as_str(), + kubernetes.cloud_provider().secret_access_key().as_str(), + ) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Role {} is already present, no need to create", role.role_name)), + )), + Err(e) => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), + None, + )), + } + } + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Deploying {} cluster.", kubernetes.kind())), + )); + + kubernetes.send_to_customer( + format!( + "Deploying {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + // temporary: remove helm/kube management from terraform + match terraform_init_validate_state_list(temp_dir.as_str()) { + Ok(x) => { + let items_type = vec!["helm_release", "kubernetes_namespace"]; + for item in items_type { + for entry in x.clone() { + if entry.starts_with(item) { + match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), + Err(e) => { + return Err(EngineError::new_terraform_cannot_remove_entry_out( + event_details, + entry.to_string(), + e, + )) + } + } + }; + } + } + } + Err(e) => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), + }; + + // terraform deployment dedicated to cloud resources + if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), kubernetes.context().is_dry_run_deploy()) { + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + + // kubernetes helm deployments on the cluster + // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform + let kubeconfig_path = kubernetes.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(&kubeconfig_path); + + let credentials_environment_variables: Vec<(String, String)> = kubernetes + .cloud_provider() + .credentials_environment_variables() + .into_iter() + .map(|x| (x.0.to_string(), x.1.to_string())) + .collect(); + + let charts_prerequisites = ChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(kubernetes.dns_provider()), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); + + let helm_charts_to_deploy = aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; + + deploy_charts_levels( + kubeconfig_path, + &credentials_environment_variables, + helm_charts_to_deploy, + kubernetes.context().is_dry_run_deploy(), + ) + .map_err(|e| EngineError::new_helm_charts_deploy_error(event_details.clone(), e)) +} + +fn create_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + let (kubeconfig_path, _) = kubernetes.get_kubeconfig_file()?; + let environment_variables = kubernetes.cloud_provider().credentials_environment_variables(); + + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe(format!("{}.create_error() called.", kubernetes.kind())), + )); + + match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { + Ok(ok_line) => kubernetes + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), + Err(err) => kubernetes.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new( + "Error trying to get kubernetes events".to_string(), + Some(err.message(ErrorMessageVerbosity::FullDetails)), + ), + )), + }; + + Ok(()) +} + +fn upgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe(format!("{}.upgrade_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn downgrade() -> Result<(), EngineError> { + Ok(()) +} + +fn downgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe(format!("{}.downgrade_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn pause( + kubernetes: &dyn Kubernetes, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + + kubernetes.send_to_customer( + format!( + "Preparing {} {} cluster pause with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + kubernetes.logger().log(EngineEvent::Info( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe(format!("Preparing {} cluster pause.", kubernetes.kind())), + )); + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let mut context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + // pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc... + let worker_nodes: Vec = Vec::new(); + context.insert("eks_worker_nodes", &worker_nodes); + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + // pause: only select terraform workers elements to pause to avoid applying on the whole config + // this to avoid failures because of helm deployments on removing workers nodes + let tf_workers_resources = match terraform_init_validate_state_list(temp_dir.as_str()) { + Ok(x) => { + let mut tf_workers_resources_name = Vec::new(); + for name in x { + if name.starts_with("aws_eks_node_group.") { + tf_workers_resources_name.push(name); + } + } + tf_workers_resources_name + } + Err(e) => { + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); + kubernetes.logger().log(EngineEvent::Error(error.clone(), None)); + return Err(error); + } + }; + + if tf_workers_resources.is_empty() { + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); + } + + let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; + + // pause: wait 1h for the engine to have 0 running jobs before pausing and avoid getting unreleased lock (from helm or terraform for example) + if options.qovery_engine_location == EngineLocation::ClientSide { + match kubernetes.context().is_feature_enabled(&Features::MetricsHistory) { + true => { + let metric_name = "taskmanager_nb_running_tasks"; + let wait_engine_job_finish = retry::retry(Fixed::from_millis(60000).take(60), || { + return match kubectl_exec_api_custom_metrics( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + "qovery", + None, + metric_name, + ) { + Ok(metrics) => { + let mut current_engine_jobs = 0; + + for metric in metrics.items { + match metric.value.parse::() { + Ok(job_count) if job_count > 0 => current_engine_jobs += 1, + Err(e) => { + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics( + event_details.clone(), + CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); + } + _ => {} + } + } + + if current_engine_jobs == 0 { + OperationResult::Ok(()) + } else { + OperationResult::Retry(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), None)) + } + } + Err(e) => { + OperationResult::Retry( + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), e)) + } + }; + }); + + match wait_engine_job_finish { + Ok(_) => { + kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + } + Err(Operation { error, .. }) => { + return Err(error) + } + Err(retry::Error::Internal(msg)) => { + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) + } + } + } + false => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + } + } + + let mut terraform_args_string = vec!["apply".to_string(), "-auto-approve".to_string()]; + for x in tf_workers_resources { + terraform_args_string.push(format!("-target={}", x)); + } + let terraform_args = terraform_args_string.iter().map(|x| &**x).collect(); + + kubernetes.send_to_customer( + format!( + "Pausing {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), + )); + + match terraform_exec(temp_dir.as_str(), terraform_args) { + Ok(_) => { + let message = format!("Kubernetes cluster {} successfully paused", kubernetes.name()); + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); + Ok(()) + } + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), + } +} + +fn pause_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe(format!("{}.pause_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn delete( + kubernetes: &dyn Kubernetes, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + let mut skip_kubernetes_step = false; + + kubernetes.send_to_customer( + format!( + "Preparing to delete {} cluster {} with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Preparing to delete {} cluster.", kubernetes.kind())), + )); + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() { + Ok(x) => x, + Err(e) => { + let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(safe_message.to_string(), Some(e.message(ErrorMessageVerbosity::FullDetails))), + )); + + skip_kubernetes_step = true; + "".to_string() + } + }; + + // should apply before destroy to be sure destroy will compute on all resources + // don't exit on failure, it can happen if we resume a destroy process + let message = format!( + "Ensuring everything is up to date before deleting cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); + + if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { + // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy + kubernetes.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); + }; + + if !skip_kubernetes_step { + // should make the diff between all namespaces and qovery managed namespaces + let message = format!( + "Deleting all non-Qovery deployed applications and dependencies for cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); + + kubernetes.send_to_customer(&message, &listeners_helper); + + let all_namespaces = kubectl_exec_get_all_namespaces( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + ); + + match all_namespaces { + Ok(namespace_vec) => { + let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); + let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); + + for namespace_to_delete in namespaces_to_delete.iter() { + match cmd::kubectl::kubectl_exec_delete_namespace( + &kubernetes_config_file_path, + namespace_to_delete, + kubernetes.cloud_provider().credentials_environment_variables(), + ) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), + Err(e) => { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); + } + } + } + } + } + Err(e) => { + let message_safe = format!( + "Error while getting all namespaces for Kubernetes cluster {}", + kubernetes.name_with_id(), + ); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), + )); + } + } + + let message = format!( + "Deleting all Qovery deployed elements and associated dependencies for cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.send_to_customer(&message, &listeners_helper); + + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + // delete custom metrics api to avoid stale namespaces on deletion + let helm = Helm::new( + &kubernetes_config_file_path, + &kubernetes.cloud_provider().credentials_environment_variables(), + ) + .map_err(|e| to_engine_error(&event_details, e))?; + let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); + helm.uninstall(&chart, &[]) + .map_err(|e| to_engine_error(&event_details, e))?; + + // required to avoid namespace stuck on deletion + uninstall_cert_manager( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + event_details.clone(), + kubernetes.logger(), + )?; + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); + + let qovery_namespaces = get_qovery_managed_namespaces(); + for qovery_namespace in qovery_namespaces.iter() { + let charts_to_delete = helm + .list_release(Some(qovery_namespace), &[]) + .map_err(|e| to_engine_error(&event_details, e))?; + + for chart in charts_to_delete { + let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); + match helm.uninstall(&chart_info, &[]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), + Err(e) => { + let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) + } + } + } + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); + + for qovery_namespace in qovery_namespaces.iter() { + let deletion = cmd::kubectl::kubectl_exec_delete_namespace( + &kubernetes_config_file_path, + qovery_namespace, + kubernetes.cloud_provider().credentials_environment_variables(), + ); + match deletion { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), + Err(e) => { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) + } + } + } + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); + + match helm.list_release(None, &[]) { + Ok(helm_charts) => { + for chart in helm_charts { + let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); + match helm.uninstall(&chart_info, &[]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), + Err(e) => { + let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) + } + } + } + } + Err(e) => { + let message_safe = "Unable to get helm list"; + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) + } + } + }; + + let message = format!("Deleting Kubernetes cluster {}/{}", kubernetes.name(), kubernetes.id()); + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); + + match retry::retry( + Fibonacci::from_millis(60000).take(3), + || match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { + Ok(_) => OperationResult::Ok(()), + Err(e) => OperationResult::Retry(e), + }, + ) { + Ok(_) => { + kubernetes.send_to_customer( + format!( + "Kubernetes cluster {}/{} successfully deleted", + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + kubernetes.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); + Ok(()) + } + Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + error, + )), + Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + CommandError::new("Error while trying to perform Terraform destroy".to_string(), Some(msg), None), + )), + } +} + +fn delete_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe(format!("{}.delete_error() called.", kubernetes.kind())), + )); + + Ok(()) +} diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 36fb944c..8a692f45 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -5,6 +5,7 @@ use const_format::formatcp; use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; use qovery_engine::cloud_provider::Kind::Aws; @@ -60,16 +61,19 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> &context, logger, AWS_TEST_REGION.to_string().as_str(), + KKind::Eks, AWS_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, ) } + impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -84,10 +88,11 @@ impl Cluster for AWS { let cloud_provider: Arc> = Arc::new(AWS::cloud_provider(context)); let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); - let k = get_environment_test_kubernetes( + let kubernetes = get_environment_test_kubernetes( Aws, context, cloud_provider.clone(), + kubernetes_kind, dns_provider.clone(), logger.clone(), localisation, @@ -101,7 +106,7 @@ impl Cluster for AWS { container_registry, cloud_provider, dns_provider, - k, + kubernetes, ) } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 7c2b440f..2b639604 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -20,17 +20,18 @@ use crate::utilities::{ FuncTestsSecrets, }; use base64; -use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; +use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EC2, EKS}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; use qovery_engine::cloud_provider::environment::Environment; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, Edge, Kind}; +use qovery_engine::cloud_provider::{CloudProvider, Kind}; use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; @@ -62,6 +63,7 @@ pub trait Cluster { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -1136,14 +1138,14 @@ pub fn test_db( Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), - Kind::Edge(Edge::Aws) => (AWS_TEST_REGION.to_string(), AWS_K3S_VERSION.to_string()), }; let engine_config = match provider_kind { - Kind::Aws | Kind::Edge(Edge::Aws) => AWS::docker_cr_engine( + Kind::Aws => AWS::docker_cr_engine( &context, logger.clone(), localisation.as_str(), + KKind::Eks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1152,6 +1154,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), + KKind::Doks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1160,6 +1163,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), + KKind::ScwKapsule, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1224,6 +1228,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::Eks, kubernetes_version, &ClusterDomain::Default, None, @@ -1232,6 +1237,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::Doks, kubernetes_version, &ClusterDomain::Default, None, @@ -1240,6 +1246,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::ScwKapsule, kubernetes_version, &ClusterDomain::Default, None, @@ -1256,6 +1263,7 @@ pub fn get_environment_test_kubernetes<'a>( provider_kind: Kind, context: &Context, cloud_provider: Arc>, + kubernetes_kind: KKind, dns_provider: Arc>, logger: Box, localisation: &str, @@ -1263,16 +1271,16 @@ pub fn get_environment_test_kubernetes<'a>( vpc_network_mode: Option, ) -> Box { let secrets = FuncTestsSecrets::new(); - let k: Box; - match provider_kind { - Kind::Aws => { + let kubernetes: Box = match kubernetes_kind { + KKind::Eks => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); if vpc_network_mode.is_some() { options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); } - k = Box::new( + + Box::new( EKS::new( context.clone(), context.cluster_id(), @@ -1288,11 +1296,35 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Do => { + KKind::Ec2 => { + let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); + let mut options = AWS::kubernetes_cluster_options(secrets, None); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } + + Box::new( + EC2::new( + context.clone(), + context.cluster_id(), + uuid::Uuid::new_v4(), + format!("qovery-{}", context.cluster_id()).as_str(), + kubernetes_version, + region.clone(), + region.get_zones_to_string(), + cloud_provider, + dns_provider, + options, + logger, + ) + .unwrap(), + ) + } + KKind::Doks => { let region = DoRegion::from_str(localisation).expect("DO region not supported"); - k = Box::new( + Box::new( DOKS::new( context.clone(), context.cluster_id().to_string(), @@ -1307,11 +1339,11 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Scw => { + KKind::ScwKapsule => { let zone = ScwZone::from_str(localisation).expect("SCW zone not supported"); - k = Box::new( + Box::new( Kapsule::new( context.clone(), context.cluster_id().to_string(), @@ -1326,11 +1358,11 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - } + }; - return k; + return kubernetes; } pub fn get_cluster_test_kubernetes<'a>( @@ -1419,6 +1451,7 @@ pub fn get_cluster_test_kubernetes<'a>( pub fn cluster_test( test_name: &str, provider_kind: Kind, + kubernetes_kind: KKind, context: Context, logger: Box, localisation: &str, @@ -1441,6 +1474,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1449,6 +1483,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1457,6 +1492,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1547,6 +1583,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::Eks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1555,6 +1592,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::Doks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1563,6 +1601,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::ScwKapsule, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 36a5db93..e81e336f 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -3,6 +3,7 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; use qovery_engine::cloud_provider::digitalocean::DO; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; @@ -13,6 +14,7 @@ use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; +use qovery_engine::cloud_provider::kubernetes::Kind; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; @@ -48,6 +50,7 @@ pub fn do_default_engine_config(context: &Context, logger: Box) -> E &context, logger, DO_TEST_REGION.to_string().as_str(), + KKind::Doks, DO_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -59,6 +62,7 @@ impl Cluster for DO { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -76,6 +80,7 @@ impl Cluster for DO { Do, context, cloud_provider.clone(), + kubernetes_kind, dns_provider.clone(), logger.clone(), localisation, diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index d3c570bf..4db9b9ff 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,28 +1,31 @@ -use const_format::formatcp; -use qovery_engine::build_platform::Build; -use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; -use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; -use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; -use qovery_engine::engine::EngineConfig; -use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; -use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; -use crate::cloudflare::dns_provider_cloudflare; -use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; +use const_format::formatcp; +use tracing::error; -use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use qovery_engine::build_platform::Build; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::kubernetes::Kind; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; +use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; +use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::Kind::Scw; +use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::errors::ContainerRegistryError; +use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::engine::EngineConfig; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::logger::Logger; use qovery_engine::models::scaleway::ScwZone; -use tracing::error; +use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; + +use crate::cloudflare::dns_provider_cloudflare; +use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; pub const SCW_KUBERNETES_MAJOR_VERSION: u8 = 1; @@ -69,6 +72,7 @@ pub fn scw_default_engine_config(context: &Context, logger: Box) -> &context, logger, SCW_TEST_ZONE.to_string().as_str(), + KKind::ScwKapsule, SCW_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -80,6 +84,7 @@ impl Cluster for Scaleway { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -98,6 +103,7 @@ impl Cluster for Scaleway { Scw, context, cloud_provider.clone(), + Kind::ScwKapsule, dns_provider.clone(), logger.clone(), localisation, diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index 53f790b8..e403f598 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -1,14 +1,18 @@ extern crate test_utilities; -use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +use std::str::FromStr; + use ::function_name::named; +use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; -use std::str::FromStr; -use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + +use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; #[cfg(feature = "test-aws-infra")] fn create_and_destroy_eks_cluster( @@ -25,6 +29,7 @@ fn create_and_destroy_eks_cluster( cluster_test( test_name, Kind::Aws, + KKind::Eks, context( generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str(), diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index 9dbf76d3..754072bc 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -1,6 +1,7 @@ use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::WithNatGateways; use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; use test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; @@ -38,6 +39,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { cluster_test( function_name!(), Kind::Aws, + KKind::Eks, context.clone(), logger(), region, diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/edge/aws/edge_aws_kubernetes.rs index fbfdacb4..a46c9368 100644 --- a/tests/edge/aws/edge_aws_kubernetes.rs +++ b/tests/edge/aws/edge_aws_kubernetes.rs @@ -3,6 +3,8 @@ extern crate test_utilities; use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; + use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; @@ -27,6 +29,7 @@ fn create_and_destroy_edge_aws_cluster( cluster_test( test_name, Kind::Aws, + KKind::Ec2, context( generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str(), diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 952cc24d..5f939656 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -4,6 +4,8 @@ use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETE use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; + use qovery_engine::cloud_provider::Kind; use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; @@ -21,6 +23,7 @@ fn create_and_destroy_kapsule_cluster( cluster_test( test_name, Kind::Scw, + KKind::ScwKapsule, context(generate_id().as_str(), generate_cluster_id(zone.as_str()).as_str()), logger(), zone.as_str(), diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index bef7262d..f50f2d96 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,7 +1,9 @@ use ::function_name::named; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -32,6 +34,7 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { cluster_test( function_name!(), Kind::Scw, + KKind::ScwKapsule, context.clone(), logger, zone.as_str(), From 1a2c96b122fd9443fc5e7d5155853ab93568328e Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:13:57 +0200 Subject: [PATCH 050/122] wip: fix multi kubernetes provider tests --- test_utilities/src/common.rs | 109 +++++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 44 deletions(-) diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 2b639604..8a8e857e 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -1375,21 +1375,21 @@ pub fn get_cluster_test_kubernetes<'a>( localisation: &str, aws_zones: Option>, cloud_provider: Arc>, + kubernetes_provider: KKind, dns_provider: Arc>, vpc_network_mode: Option, logger: Box, ) -> Box { - let k: Box; - - match provider_kind { - Kind::Aws => { + let kubernetes: Box = match kubernetes_provider { + KKind::Eks => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); if vpc_network_mode.is_some() { options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); - k = Box::new( + + Box::new( EKS::new( context.clone(), cluster_id.as_str(), @@ -1405,47 +1405,68 @@ pub fn get_cluster_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Do => { - k = Box::new( - DOKS::new( - context.clone(), - cluster_id.clone(), - uuid::Uuid::new_v4(), - cluster_name.clone(), - boot_version, - DoRegion::from_str(localisation.clone()).expect("Unknown region set for DOKS"), - cloud_provider, - dns_provider, - DO::kubernetes_nodes(), - DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), - logger, - ) - .unwrap(), - ); - } - Kind::Scw => { - k = Box::new( - Kapsule::new( - context.clone(), - cluster_id.clone(), - uuid::Uuid::new_v4(), - cluster_name.clone(), - boot_version, - ScwZone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"), - cloud_provider, - dns_provider, - Scaleway::kubernetes_nodes(), - Scaleway::kubernetes_cluster_options(secrets, None), - logger, - ) - .unwrap(), - ); - } - } + KKind::Ec2 => { + let mut options = AWS::kubernetes_cluster_options(secrets, None); + let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } + let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); - return k; + Box::new( + EC2::new( + context.clone(), + cluster_id.as_str(), + uuid::Uuid::new_v4(), + cluster_name.as_str(), + boot_version.as_str(), + aws_region.clone(), + aws_zones, + cloud_provider, + dns_provider, + options, + logger, + ) + .unwrap(), + ) + } + KKind::Doks => Box::new( + DOKS::new( + context.clone(), + cluster_id.clone(), + uuid::Uuid::new_v4(), + cluster_name.clone(), + boot_version, + DoRegion::from_str(localisation.clone()).expect("Unknown region set for DOKS"), + cloud_provider, + dns_provider, + DO::kubernetes_nodes(), + DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), + logger, + ) + .unwrap(), + ), + KKind::ScwKapsule => Box::new( + Kapsule::new( + context.clone(), + cluster_id.clone(), + uuid::Uuid::new_v4(), + cluster_name.clone(), + boot_version, + ScwZone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"), + cloud_provider, + dns_provider, + Scaleway::kubernetes_nodes(), + Scaleway::kubernetes_cluster_options(secrets, None), + logger, + ) + .unwrap(), + ), + }; + + return kubernetes; } pub fn cluster_test( From 79b7bea8af4996b194816b0761e7c1f28e158879 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:19:46 +0200 Subject: [PATCH 051/122] wip: fix clippy errors --- src/cloud_provider/aws/kubernetes/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 15eff3e6..e65ed05d 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -169,7 +169,7 @@ impl EKS { } } - let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + let s3 = s3(&context, ®ion, &**cloud_provider); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -580,7 +580,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || downgrade()) + send_progress_on_long_task(self, Action::Create, downgrade) } #[named] @@ -799,7 +799,7 @@ impl EC2 { let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; - let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + let s3 = s3(&context, ®ion, &**cloud_provider); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -964,7 +964,7 @@ impl Kubernetes for EC2 { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || downgrade()) + send_progress_on_long_task(self, Action::Create, downgrade) } #[named] @@ -1178,7 +1178,7 @@ fn aws_zones( Ok(aws_zones) } -fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box) -> S3 { +fn s3(context: &Context, region: &AwsRegion, cloud_provider: &dyn CloudProvider) -> S3 { S3::new( context.clone(), "s3-temp-id".to_string(), @@ -1229,7 +1229,7 @@ fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> Str fn tera_context( kubernetes: &dyn Kubernetes, zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); From d603c049200495b63cdd21939f8d634672561c00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 22 Apr 2022 09:27:45 +0200 Subject: [PATCH 052/122] Fix: revert clusterId to be a short id (#697) - Managed Databases use the clusterId in order find back the VPC --- lib/aws/bootstrap/eks-master-cluster.j2.tf | 3 ++- lib/digitalocean/bootstrap/ks-locals.j2.tf | 3 ++- lib/scaleway/bootstrap/ks-locals.j2.tf | 3 ++- src/cloud_provider/aws/kubernetes/mod.rs | 2 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 2 +- 5 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf index 32b119a3..8a480138 100644 --- a/lib/aws/bootstrap/eks-master-cluster.j2.tf +++ b/lib/aws/bootstrap/eks-master-cluster.j2.tf @@ -6,7 +6,8 @@ locals { locals { tags_common = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id + ClusterLongId = var.kubernetes_full_cluster_id OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 diff --git a/lib/digitalocean/bootstrap/ks-locals.j2.tf b/lib/digitalocean/bootstrap/ks-locals.j2.tf index dec6438f..889b5c3e 100644 --- a/lib/digitalocean/bootstrap/ks-locals.j2.tf +++ b/lib/digitalocean/bootstrap/ks-locals.j2.tf @@ -1,6 +1,7 @@ locals { tags_ks = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id + ClusterLongId = var.kubernetes_full_cluster_id OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 diff --git a/lib/scaleway/bootstrap/ks-locals.j2.tf b/lib/scaleway/bootstrap/ks-locals.j2.tf index 71da18e8..b33de6a0 100644 --- a/lib/scaleway/bootstrap/ks-locals.j2.tf +++ b/lib/scaleway/bootstrap/ks-locals.j2.tf @@ -1,6 +1,7 @@ locals { tags_ks = { - ClusterId = var.kubernetes_full_cluster_id + ClusterId = var.kubernetes_cluster_id + ClusterLongId = var.kubernetes_full_cluster_id OrganizationId = var.organization_id, Region = var.region creationDate = time_static.on_cluster_create.rfc3339 diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 5b6936f5..89816f5b 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -498,7 +498,7 @@ impl EKS { context.insert("eks_cidr_subnet", &eks_cidr_subnet); context.insert("kubernetes_cluster_name", &self.name()); context.insert("kubernetes_cluster_id", self.id()); - context.insert("kubernetes_full_cluster_id", self.context.cluster_id()); + context.insert("kubernetes_full_cluster_id", &self.long_id); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_worker_nodes", &self.nodes_groups); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index bfc48a0e..b8d9e693 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -468,7 +468,7 @@ impl Kapsule { // Kubernetes context.insert("test_cluster", &self.context.is_test_cluster()); - context.insert("kubernetes_full_cluster_id", self.context().cluster_id()); + context.insert("kubernetes_full_cluster_id", &self.long_id); context.insert("kubernetes_cluster_id", self.id()); context.insert("kubernetes_cluster_name", self.cluster_name().as_str()); context.insert("kubernetes_cluster_version", self.version()); From 6c06fd7e54ce0d3729a8ef36f39f01d022cf4cc9 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 10:48:48 +0200 Subject: [PATCH 053/122] wip: add ec2 subnets --- .../backend.j2.tf | 0 .../documentdb.tf | 0 .../eks-vpc-common.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 0 .../elasticcache.tf | 0 .../elasticsearch.tf | 0 .../qovery-vault.j2.tf | 0 lib/aws/{bootstrap => bootstrap-ec2}/rds.tf | 0 .../s3-qovery-buckets.tf | 0 .../tf-default-vars.j2.tf | 0 .../tf-providers-aws.j2.tf | 0 .../{bootstrap => bootstrap-eks}/README.md | 0 lib/aws/bootstrap-eks/backend.j2.tf | 10 + .../chart_values/external-dns.j2.yaml | 0 .../chart_values/grafana.j2.yaml | 0 .../chart_values/kube-prometheus-stack.yaml | 0 .../chart_values/loki.yaml | 0 .../chart_values/metrics-server.yaml | 0 .../chart_values/nginx-ingress.yaml | 0 .../chart_values/pleco.yaml | 0 .../charts/aws-calico/.helmignore | 0 .../charts/aws-calico/Chart.yaml | 0 .../charts/aws-calico/README.md | 0 .../charts/aws-calico/crds/crds.yaml | 0 .../charts/aws-calico/templates/_helpers.tpl | 0 .../aws-calico/templates/config-map.yaml | 0 .../aws-calico/templates/daemon-set.yaml | 0 .../aws-calico/templates/deployment.yaml | 0 .../templates/pod-disruption-budget.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../charts/aws-calico/templates/rbac.yaml | 0 .../templates/service-accounts.yaml | 0 .../charts/aws-calico/templates/service.yaml | 0 .../charts/aws-calico/values.yaml | 0 .../charts/aws-limits-exporter/.helmignore | 0 .../charts/aws-limits-exporter/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../templates/secrets.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../charts/aws-limits-exporter/values.yaml | 0 .../aws-node-termination-handler/.helmignore | 0 .../aws-node-termination-handler/Chart.yaml | 0 .../aws-node-termination-handler/README.md | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/daemonset.yaml | 0 .../templates/psp.yaml | 0 .../templates/serviceaccount.yaml | 0 .../aws-node-termination-handler/values.yaml | 0 .../charts/aws-ui-view/.helmignore | 0 .../charts/aws-ui-view/Chart.yaml | 0 .../charts/aws-ui-view/templates/_helpers.tpl | 0 .../aws-ui-view/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../charts/aws-ui-view/values.yaml | 0 .../charts/aws-vpc-cni/.helmignore | 0 .../charts/aws-vpc-cni/Chart.yaml | 0 .../charts/aws-vpc-cni/README.md | 0 .../charts/aws-vpc-cni/templates/_helpers.tpl | 0 .../aws-vpc-cni/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../aws-vpc-cni/templates/configmap.yaml | 0 .../templates/customresourcedefinition.yaml | 0 .../aws-vpc-cni/templates/daemonset.yaml | 0 .../aws-vpc-cni/templates/eniconfig.yaml | 0 .../aws-vpc-cni/templates/serviceaccount.yaml | 0 .../charts/aws-vpc-cni/values.yaml | 0 .../charts/coredns-config/.helmignore | 0 .../charts/coredns-config/Chart.yaml | 0 .../coredns-config/templates/_helpers.tpl | 0 .../coredns-config/templates/configmap.yml | 0 .../charts/coredns-config/values.yaml | 0 .../charts/iam-eks-user-mapper/.helmignore | 0 .../charts/iam-eks-user-mapper/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../iam-eks-user-mapper/templates/rbac.yaml | 0 .../iam-eks-user-mapper/templates/secret.yaml | 0 .../templates/serviceaccount.yaml | 0 .../charts/iam-eks-user-mapper/values.yaml | 0 .../charts/q-storageclass/.helmignore | 0 .../charts/q-storageclass/Chart.yaml | 0 .../q-storageclass/templates/_helpers.tpl | 0 .../templates/storageclass.yaml | 0 .../charts/q-storageclass/values.yaml | 0 lib/aws/bootstrap-eks/documentdb.tf | 81 +++++ .../eks-ebs-csi-driver.tf | 0 .../eks-gen-kubectl-config.j2.tf | 0 .../eks-master-cluster.j2.tf | 0 .../eks-master-iam.tf | 0 .../eks-master-sec-group.tf | 0 .../eks-s3-kubeconfig-store.tf | 0 lib/aws/bootstrap-eks/eks-vpc-common.j2.tf | 42 +++ .../eks-vpc-with-nat-gateways.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 75 ++++ .../eks-workers-iam.tf | 0 .../eks-workers-nodes.j2.tf | 0 .../eks-workers-sec-group.tf | 0 lib/aws/bootstrap-eks/elasticcache.tf | 80 +++++ lib/aws/bootstrap-eks/elasticsearch.tf | 79 +++++ .../helm-aws-iam-eks-user-mapper.tf | 0 .../helm-cluster-autoscaler.j2.tf | 0 .../helm-grafana.j2.tf | 0 .../helm-loki.j2.tf | 0 .../helm-nginx-ingress.tf | 0 .../{bootstrap => bootstrap-eks}/helper.j2.sh | 0 .../qovery-tf-config.j2.tf | 0 lib/aws/bootstrap-eks/qovery-vault.j2.tf | 29 ++ lib/aws/bootstrap-eks/rds.tf | 118 +++++++ lib/aws/bootstrap-eks/s3-qovery-buckets.tf | 44 +++ lib/aws/bootstrap-eks/tf-default-vars.j2.tf | 319 ++++++++++++++++++ lib/aws/bootstrap-eks/tf-providers-aws.j2.tf | 60 ++++ src/cloud_provider/aws/kubernetes/mod.rs | 42 ++- test_utilities/src/aws.rs | 3 + 118 files changed, 976 insertions(+), 6 deletions(-) rename lib/aws/{bootstrap => bootstrap-ec2}/backend.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/documentdb.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/eks-vpc-common.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/eks-vpc-without-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/elasticcache.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/elasticsearch.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/qovery-vault.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/rds.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/s3-qovery-buckets.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/tf-default-vars.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/tf-providers-aws.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/README.md (100%) create mode 100644 lib/aws/bootstrap-eks/backend.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/external-dns.j2.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/grafana.j2.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/kube-prometheus-stack.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/loki.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/metrics-server.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/nginx-ingress.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/pleco.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/crds/crds.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/config-map.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/daemon-set.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/pod-disruption-budget.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/podsecuritypolicy.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/rbac.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/service-accounts.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/service.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/secrets.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/service.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/servicemonitor.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/psp.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/configmap.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/customresourcedefinition.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/eniconfig.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/templates/configmap.yml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/rbac.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/secret.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/templates/storageclass.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/values.yaml (100%) create mode 100644 lib/aws/bootstrap-eks/documentdb.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-ebs-csi-driver.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-gen-kubectl-config.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-cluster.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-iam.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-sec-group.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-s3-kubeconfig-store.tf (100%) create mode 100644 lib/aws/bootstrap-eks/eks-vpc-common.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-vpc-with-nat-gateways.j2.tf (100%) create mode 100644 lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-iam.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-nodes.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-sec-group.tf (100%) create mode 100644 lib/aws/bootstrap-eks/elasticcache.tf create mode 100644 lib/aws/bootstrap-eks/elasticsearch.tf rename lib/aws/{bootstrap => bootstrap-eks}/helm-aws-iam-eks-user-mapper.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-cluster-autoscaler.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-grafana.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-loki.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-nginx-ingress.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helper.j2.sh (100%) rename lib/aws/{bootstrap => bootstrap-eks}/qovery-tf-config.j2.tf (100%) create mode 100644 lib/aws/bootstrap-eks/qovery-vault.j2.tf create mode 100644 lib/aws/bootstrap-eks/rds.tf create mode 100644 lib/aws/bootstrap-eks/s3-qovery-buckets.tf create mode 100644 lib/aws/bootstrap-eks/tf-default-vars.j2.tf create mode 100644 lib/aws/bootstrap-eks/tf-providers-aws.j2.tf diff --git a/lib/aws/bootstrap/backend.j2.tf b/lib/aws/bootstrap-ec2/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap/backend.j2.tf rename to lib/aws/bootstrap-ec2/backend.j2.tf diff --git a/lib/aws/bootstrap/documentdb.tf b/lib/aws/bootstrap-ec2/documentdb.tf similarity index 100% rename from lib/aws/bootstrap/documentdb.tf rename to lib/aws/bootstrap-ec2/documentdb.tf diff --git a/lib/aws/bootstrap/eks-vpc-common.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-common.j2.tf rename to lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf diff --git a/lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf rename to lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap/elasticcache.tf b/lib/aws/bootstrap-ec2/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap/elasticcache.tf rename to lib/aws/bootstrap-ec2/elasticcache.tf diff --git a/lib/aws/bootstrap/elasticsearch.tf b/lib/aws/bootstrap-ec2/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap/elasticsearch.tf rename to lib/aws/bootstrap-ec2/elasticsearch.tf diff --git a/lib/aws/bootstrap/qovery-vault.j2.tf b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap/qovery-vault.j2.tf rename to lib/aws/bootstrap-ec2/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap/rds.tf b/lib/aws/bootstrap-ec2/rds.tf similarity index 100% rename from lib/aws/bootstrap/rds.tf rename to lib/aws/bootstrap-ec2/rds.tf diff --git a/lib/aws/bootstrap/s3-qovery-buckets.tf b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap/s3-qovery-buckets.tf rename to lib/aws/bootstrap-ec2/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap/tf-default-vars.j2.tf b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap/tf-default-vars.j2.tf rename to lib/aws/bootstrap-ec2/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap/tf-providers-aws.j2.tf b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap/tf-providers-aws.j2.tf rename to lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf diff --git a/lib/aws/bootstrap/README.md b/lib/aws/bootstrap-eks/README.md similarity index 100% rename from lib/aws/bootstrap/README.md rename to lib/aws/bootstrap-eks/README.md diff --git a/lib/aws/bootstrap-eks/backend.j2.tf b/lib/aws/bootstrap-eks/backend.j2.tf new file mode 100644 index 00000000..a1418800 --- /dev/null +++ b/lib/aws/bootstrap-eks/backend.j2.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + access_key = "{{ aws_access_key_tfstates_account }}" + secret_key = "{{ aws_secret_key_tfstates_account }}" + bucket = "{{ aws_terraform_backend_bucket }}" + key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" + dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" + region = "{{ aws_region_tfstates_account }}" + } +} diff --git a/lib/aws/bootstrap/chart_values/external-dns.j2.yaml b/lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/external-dns.j2.yaml rename to lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml diff --git a/lib/aws/bootstrap/chart_values/grafana.j2.yaml b/lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/grafana.j2.yaml rename to lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml diff --git a/lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml b/lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml rename to lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml diff --git a/lib/aws/bootstrap/chart_values/loki.yaml b/lib/aws/bootstrap-eks/chart_values/loki.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/loki.yaml rename to lib/aws/bootstrap-eks/chart_values/loki.yaml diff --git a/lib/aws/bootstrap/chart_values/metrics-server.yaml b/lib/aws/bootstrap-eks/chart_values/metrics-server.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/metrics-server.yaml rename to lib/aws/bootstrap-eks/chart_values/metrics-server.yaml diff --git a/lib/aws/bootstrap/chart_values/nginx-ingress.yaml b/lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/nginx-ingress.yaml rename to lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml diff --git a/lib/aws/bootstrap/chart_values/pleco.yaml b/lib/aws/bootstrap-eks/chart_values/pleco.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/pleco.yaml rename to lib/aws/bootstrap-eks/chart_values/pleco.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/.helmignore b/lib/aws/bootstrap-eks/charts/aws-calico/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-calico/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/README.md b/lib/aws/bootstrap-eks/charts/aws-calico/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/README.md rename to lib/aws/bootstrap-eks/charts/aws-calico/README.md diff --git a/lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/service.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/service.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/values.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/README.md rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/values.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/README.md rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml diff --git a/lib/aws/bootstrap/charts/coredns-config/.helmignore b/lib/aws/bootstrap-eks/charts/coredns-config/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/.helmignore rename to lib/aws/bootstrap-eks/charts/coredns-config/.helmignore diff --git a/lib/aws/bootstrap/charts/coredns-config/Chart.yaml b/lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/Chart.yaml rename to lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml diff --git a/lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml b/lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml rename to lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml diff --git a/lib/aws/bootstrap/charts/coredns-config/values.yaml b/lib/aws/bootstrap-eks/charts/coredns-config/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/values.yaml rename to lib/aws/bootstrap-eks/charts/coredns-config/values.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/.helmignore b/lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/.helmignore rename to lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore diff --git a/lib/aws/bootstrap/charts/q-storageclass/Chart.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/Chart.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/values.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/values.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml diff --git a/lib/aws/bootstrap-eks/documentdb.tf b/lib/aws/bootstrap-eks/documentdb.tf new file mode 100644 index 00000000..ea04fec0 --- /dev/null +++ b/lib/aws/bootstrap-eks/documentdb.tf @@ -0,0 +1,81 @@ +locals { + tags_documentdb = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "DocumentDB" + } + ) +} + +# Network + +resource "aws_subnet" "documentdb_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.documentdb_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.documentdb_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.documentdb_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_route_table_association" "documentdb_cluster_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_docdb_subnet_group" "documentdb" { + description = "DocumentDB linked to ${var.kubernetes_cluster_id}" + name = "documentdb-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) + + tags = local.tags_documentdb +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "documentdb_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow DocumentDB incoming access from anywhere" + from_port = 27017 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 27017 + type = "ingress" +} diff --git a/lib/aws/bootstrap/eks-ebs-csi-driver.tf b/lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf similarity index 100% rename from lib/aws/bootstrap/eks-ebs-csi-driver.tf rename to lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf diff --git a/lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf b/lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf rename to lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap-eks/eks-master-cluster.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-cluster.j2.tf rename to lib/aws/bootstrap-eks/eks-master-cluster.j2.tf diff --git a/lib/aws/bootstrap/eks-master-iam.tf b/lib/aws/bootstrap-eks/eks-master-iam.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-iam.tf rename to lib/aws/bootstrap-eks/eks-master-iam.tf diff --git a/lib/aws/bootstrap/eks-master-sec-group.tf b/lib/aws/bootstrap-eks/eks-master-sec-group.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-sec-group.tf rename to lib/aws/bootstrap-eks/eks-master-sec-group.tf diff --git a/lib/aws/bootstrap/eks-s3-kubeconfig-store.tf b/lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf similarity index 100% rename from lib/aws/bootstrap/eks-s3-kubeconfig-store.tf rename to lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf new file mode 100644 index 00000000..63b91880 --- /dev/null +++ b/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf @@ -0,0 +1,42 @@ +data "aws_availability_zones" "available" {} + +locals { + tags_eks_vpc = merge( + local.tags_common, + { + Name = "qovery-eks-workers", + "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", + "kubernetes.io/role/elb" = 1, + {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} + } + ) + + tags_eks_vpc_public = merge( + local.tags_eks_vpc, + { + "Public" = "true" + } + ) + + tags_eks_vpc_private = merge( + local.tags_eks, + { + "Public" = "false" + } + ) +} + +# VPC +resource "aws_vpc" "eks" { + cidr_block = var.vpc_cidr_block + enable_dns_hostnames = true + + tags = local.tags_eks_vpc +} + +# Internet gateway +resource "aws_internet_gateway" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + tags = local.tags_eks_vpc +} \ No newline at end of file diff --git a/lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf rename to lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf new file mode 100644 index 00000000..d0174308 --- /dev/null +++ b/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf @@ -0,0 +1,75 @@ +{% if vpc_qovery_network_mode == "WithoutNatGateways" %} +# Public subnets +resource "aws_subnet" "eks_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.eks_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.eks_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.eks_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_route_table" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.eks_cluster.id + } + + // todo(pmavro): add tests for it when it will be available in the SDK + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_eks_vpc +} + +resource "aws_route_table_association" "eks_cluster_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + subnet_id = aws_subnet.eks_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + subnet_id = aws_subnet.eks_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + subnet_id = aws_subnet.eks_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} +{% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap/eks-workers-iam.tf b/lib/aws/bootstrap-eks/eks-workers-iam.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-iam.tf rename to lib/aws/bootstrap-eks/eks-workers-iam.tf diff --git a/lib/aws/bootstrap/eks-workers-nodes.j2.tf b/lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-nodes.j2.tf rename to lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf diff --git a/lib/aws/bootstrap/eks-workers-sec-group.tf b/lib/aws/bootstrap-eks/eks-workers-sec-group.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-sec-group.tf rename to lib/aws/bootstrap-eks/eks-workers-sec-group.tf diff --git a/lib/aws/bootstrap-eks/elasticcache.tf b/lib/aws/bootstrap-eks/elasticcache.tf new file mode 100644 index 00000000..44073c63 --- /dev/null +++ b/lib/aws/bootstrap-eks/elasticcache.tf @@ -0,0 +1,80 @@ +locals { + tags_elasticache = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "Elasticache" + } + ) +} + +# Network + +resource "aws_subnet" "elasticache_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticache_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticache_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticache_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_route_table_association" "elasticache_cluster_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_elasticache_subnet_group" "elasticache" { + description = "Elasticache linked to ${var.kubernetes_cluster_id}" + # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly + name = "elasticache-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "elasticache_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow Redis incoming access from anywhere" + from_port = 6379 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 6379 + type = "ingress" +} diff --git a/lib/aws/bootstrap-eks/elasticsearch.tf b/lib/aws/bootstrap-eks/elasticsearch.tf new file mode 100644 index 00000000..f5e873dd --- /dev/null +++ b/lib/aws/bootstrap-eks/elasticsearch.tf @@ -0,0 +1,79 @@ +locals { + tags_elasticsearch = merge( + local.tags_eks, + { + "Service" = "Elasticsearch" + } + ) +} + +# Network + +resource "aws_subnet" "elasticsearch_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticsearch_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticsearch_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticsearch_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_security_group" "elasticsearch" { + name = "elasticsearch-${var.kubernetes_cluster_id}" + description = "Elasticsearch security group" + vpc_id = aws_vpc.eks.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + + cidr_blocks = [ + aws_vpc.eks.cidr_block + ] + } + + tags = local.tags_elasticsearch +} diff --git a/lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf b/lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf similarity index 100% rename from lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf rename to lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf diff --git a/lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf b/lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf rename to lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf diff --git a/lib/aws/bootstrap/helm-grafana.j2.tf b/lib/aws/bootstrap-eks/helm-grafana.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-grafana.j2.tf rename to lib/aws/bootstrap-eks/helm-grafana.j2.tf diff --git a/lib/aws/bootstrap/helm-loki.j2.tf b/lib/aws/bootstrap-eks/helm-loki.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-loki.j2.tf rename to lib/aws/bootstrap-eks/helm-loki.j2.tf diff --git a/lib/aws/bootstrap/helm-nginx-ingress.tf b/lib/aws/bootstrap-eks/helm-nginx-ingress.tf similarity index 100% rename from lib/aws/bootstrap/helm-nginx-ingress.tf rename to lib/aws/bootstrap-eks/helm-nginx-ingress.tf diff --git a/lib/aws/bootstrap/helper.j2.sh b/lib/aws/bootstrap-eks/helper.j2.sh similarity index 100% rename from lib/aws/bootstrap/helper.j2.sh rename to lib/aws/bootstrap-eks/helper.j2.sh diff --git a/lib/aws/bootstrap/qovery-tf-config.j2.tf b/lib/aws/bootstrap-eks/qovery-tf-config.j2.tf similarity index 100% rename from lib/aws/bootstrap/qovery-tf-config.j2.tf rename to lib/aws/bootstrap-eks/qovery-tf-config.j2.tf diff --git a/lib/aws/bootstrap-eks/qovery-vault.j2.tf b/lib/aws/bootstrap-eks/qovery-vault.j2.tf new file mode 100644 index 00000000..b12afa38 --- /dev/null +++ b/lib/aws/bootstrap-eks/qovery-vault.j2.tf @@ -0,0 +1,29 @@ +locals { + kubeconfig_base64 = base64encode(local.kubeconfig) +} +// do not run for tests clusters to avoid uncleaned info. +// do not try to use count into resource, it will fails trying to connect to vault +{% if vault_auth_method != "none" and not test_cluster %} +resource "vault_generic_secret" "cluster-access" { + path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" + + data_json = <, + pub ec2_zone_b_subnet_blocks: Vec, + pub ec2_zone_c_subnet_blocks: Vec, pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, pub eks_zone_c_subnet_blocks: Vec, @@ -154,7 +156,7 @@ impl EKS { logger: Box, ) -> Result { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + let template_directory = format!("{}/aws/bootstrap-eks", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; @@ -796,7 +798,7 @@ impl EC2 { logger: Box, ) -> Result { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + let template_directory = format!("{}/aws/bootstrap-ec2", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; let s3 = s3(&context, ®ion, &**cloud_provider); @@ -1267,6 +1269,30 @@ fn tera_context( VpcQoveryNetworkMode::WithoutNatGateways => {} }; + let mut ec2_zone_a_subnet_blocks_private = format_ips(&options.ec2_zone_a_subnet_blocks); + let mut ec2_zone_b_subnet_blocks_private = format_ips(&options.ec2_zone_b_subnet_blocks); + let mut ec2_zone_c_subnet_blocks_private = format_ips(&options.ec2_zone_c_subnet_blocks); + + match options.vpc_qovery_network_mode { + VpcQoveryNetworkMode::WithNatGateways => { + let max_subnet_zone_a = check_odd_subnets(event_details.clone(), "a", &ec2_zone_a_subnet_blocks_private)?; + let max_subnet_zone_b = check_odd_subnets(event_details.clone(), "b", &ec2_zone_b_subnet_blocks_private)?; + let max_subnet_zone_c = check_odd_subnets(event_details.clone(), "c", &ec2_zone_c_subnet_blocks_private)?; + + let ec2_zone_a_subnet_blocks_public: Vec = + ec2_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); + let ec2_zone_b_subnet_blocks_public: Vec = + ec2_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); + let ec2_zone_c_subnet_blocks_public: Vec = + ec2_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); + + context.insert("ec2_zone_a_subnet_blocks_public", &ec2_zone_a_subnet_blocks_public); + context.insert("ec2_zone_b_subnet_blocks_public", &ec2_zone_b_subnet_blocks_public); + context.insert("ec2_zone_c_subnet_blocks_public", &ec2_zone_c_subnet_blocks_public); + } + VpcQoveryNetworkMode::WithoutNatGateways => {} + }; + context.insert("vpc_qovery_network_mode", &options.vpc_qovery_network_mode.to_string()); let rds_zone_a_subnet_blocks = format_ips(&options.rds_zone_a_subnet_blocks); @@ -1434,7 +1460,10 @@ fn tera_context( context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); - context.insert("eks_worker_nodes", &node_groups); // FIXME + context.insert("eks_worker_nodes", &node_groups); + context.insert("ec2_zone_a_subnet_blocks_private", &ec2_zone_a_subnet_blocks_private); + context.insert("ec2_zone_b_subnet_blocks_private", &ec2_zone_b_subnet_blocks_private); + context.insert("ec2_zone_c_subnet_blocks_private", &ec2_zone_c_subnet_blocks_private); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); @@ -1792,8 +1821,8 @@ fn pause( )); } - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap-{type}/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap-{type}/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { @@ -1916,6 +1945,7 @@ fn pause( kubernetes .logger() .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); + Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 8a692f45..c6c5c4b1 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -154,6 +154,9 @@ impl Cluster for AWS { fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { Options { + ec2_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], + ec2_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], + ec2_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From 0f193fbde590044c723230c33bbae0b1a584740b Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 22 Apr 2022 16:41:46 +0200 Subject: [PATCH 054/122] feat: update tf config for aws ec2 --- lib/aws/bootstrap-ec2/documentdb.tf | 18 +-- lib/aws/bootstrap-ec2/ec2-sec-group.tf | 27 +++++ ...-vpc-common.j2.tf => ec2-vpc-common.j2.tf} | 25 ++-- lib/aws/bootstrap-ec2/ec2-vpc.j2.tf | 72 +++++++++++ lib/aws/bootstrap-ec2/ec2.j2.tf | 61 ++++++++++ .../eks-vpc-without-nat-gateways.j2.tf | 75 ------------ lib/aws/bootstrap-ec2/elasticcache.tf | 18 +-- lib/aws/bootstrap-ec2/elasticsearch.tf | 18 +-- lib/aws/bootstrap-ec2/qovery-vault.j2.tf | 2 +- lib/aws/bootstrap-ec2/rds.tf | 20 ++-- lib/aws/bootstrap-ec2/s3-qovery-buckets.tf | 4 +- lib/aws/bootstrap-ec2/tf-default-vars.j2.tf | 112 +++++++++--------- 12 files changed, 263 insertions(+), 189 deletions(-) create mode 100644 lib/aws/bootstrap-ec2/ec2-sec-group.tf rename lib/aws/bootstrap-ec2/{eks-vpc-common.j2.tf => ec2-vpc-common.j2.tf} (55%) create mode 100644 lib/aws/bootstrap-ec2/ec2-vpc.j2.tf create mode 100644 lib/aws/bootstrap-ec2/ec2.j2.tf delete mode 100644 lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-ec2/documentdb.tf b/lib/aws/bootstrap-ec2/documentdb.tf index ea04fec0..04ca6934 100644 --- a/lib/aws/bootstrap-ec2/documentdb.tf +++ b/lib/aws/bootstrap-ec2/documentdb.tf @@ -1,6 +1,6 @@ locals { tags_documentdb = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "DocumentDB" } @@ -14,7 +14,7 @@ resource "aws_subnet" "documentdb_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.documentdb_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -24,7 +24,7 @@ resource "aws_subnet" "documentdb_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.documentdb_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -34,7 +34,7 @@ resource "aws_subnet" "documentdb_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.documentdb_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -43,26 +43,26 @@ resource "aws_route_table_association" "documentdb_cluster_zone_a" { count = length(var.documentdb_subnets_zone_a) subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "documentdb_cluster_zone_b" { count = length(var.documentdb_subnets_zone_b) subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "documentdb_cluster_zone_c" { count = length(var.documentdb_subnets_zone_c) subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_docdb_subnet_group" "documentdb" { description = "DocumentDB linked to ${var.kubernetes_cluster_id}" - name = "documentdb-${aws_vpc.eks.id}" + name = "documentdb-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) tags = local.tags_documentdb @@ -75,7 +75,7 @@ resource "aws_security_group_rule" "documentdb_remote_access" { description = "Allow DocumentDB incoming access from anywhere" from_port = 27017 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 27017 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws/bootstrap-ec2/ec2-sec-group.tf new file mode 100644 index 00000000..02cd4bfc --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2-sec-group.tf @@ -0,0 +1,27 @@ +resource "aws_security_group" "ec2_cluster" { + name = "qovery-ec2-${var.kubernetes_cluster_id}" + description = "Cluster communication with worker nodes" + vpc_id = aws_vpc.ec2.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.tags_ec2 +} + +# OPTIONAL: Allow inbound traffic from your local workstation external IP +# to the Kubernetes. You will need to replace A.B.C.D below with +# your real IP. Services like icanhazip.com can help you find this. +resource "aws_security_group_rule" "cluster_ingress_workstation_https" { + cidr_blocks = var.ec2_access_cidr_blocks + description = "Allow workstation to communicate with the cluster API Server" + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.ec2_cluster.id + to_port = 443 + type = "ingress" +} diff --git a/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf b/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf similarity index 55% rename from lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf rename to lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf index 63b91880..cc8682f3 100644 --- a/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf +++ b/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf @@ -1,42 +1,35 @@ data "aws_availability_zones" "available" {} locals { - tags_eks_vpc = merge( + tags_ec2_vpc = merge( local.tags_common, { - Name = "qovery-eks-workers", + Name = "qovery-ec2-workers", "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", "kubernetes.io/role/elb" = 1, {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} } ) - tags_eks_vpc_public = merge( - local.tags_eks_vpc, + tags_ec2_vpc_public = merge( + local.tags_ec2_vpc, { "Public" = "true" } ) - - tags_eks_vpc_private = merge( - local.tags_eks, - { - "Public" = "false" - } - ) } # VPC -resource "aws_vpc" "eks" { +resource "aws_vpc" "ec2" { cidr_block = var.vpc_cidr_block enable_dns_hostnames = true - tags = local.tags_eks_vpc + tags = local.tags_ec2_vpc } # Internet gateway -resource "aws_internet_gateway" "eks_cluster" { - vpc_id = aws_vpc.eks.id +resource "aws_internet_gateway" "ec2_instance" { + vpc_id = aws_vpc.ec2.id - tags = local.tags_eks_vpc + tags = local.tags_ec2_vpc } \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf b/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf new file mode 100644 index 00000000..27915b66 --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf @@ -0,0 +1,72 @@ +# Public subnets +resource "aws_subnet" "ec2_zone_a" { + count = length(var.ec2_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.ec2_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_subnet" "ec2_zone_b" { + count = length(var.ec2_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.ec2_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_subnet" "ec2_zone_c" { + count = length(var.ec2_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.ec2_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_route_table" "ec2_cluster" { + vpc_id = aws_vpc.ec2.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ec2_instance.id + } + + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_ec2_vpc +} + +resource "aws_route_table_association" "ec2_cluster_zone_a" { + count = length(var.ec2_subnets_zone_a_private) + + subnet_id = aws_subnet.ec2_zone_a.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} + +resource "aws_route_table_association" "ec2_cluster_zone_b" { + count = length(var.ec2_subnets_zone_b_private) + + subnet_id = aws_subnet.ec2_zone_b.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} + +resource "aws_route_table_association" "ec2_cluster_zone_c" { + count = length(var.ec2_subnets_zone_c_private) + + subnet_id = aws_subnet.ec2_zone_c.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws/bootstrap-ec2/ec2.j2.tf new file mode 100644 index 00000000..e85ed5f9 --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2.j2.tf @@ -0,0 +1,61 @@ +data "aws_ami" "debian" { + most_recent = true + + filter { + name = "name" + values = [var.ec2_image_info.name] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + # to get owner id: + # aws ec2 describe-images --image-ids --region us-west-2 | jq -r '.Images[0].OwnerId' + owners = [var.ec2_image_info.owners] +} + +resource "aws_instance" "web" { + ami = data.aws_ami.debian.id + instance_type = var.ec2_instance.instance_type + + # disk + root_block_device { + volume_size = "30" # GiB + volume_type = "gp2" + encrypted = true + } + + # network + associate_public_ip_address = true + + # security + #vpc_security_group_ids = [aws_vpc.ec2.*.id] + + user_data = local.bootstrap + + tags = { + Name = "HelloWorld" + } +} + +locals { + bootstrap = <> /etc/profile + +while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do + echo "kubeconfig is not yet present, sleeping" + sleep 1 +done +s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put /etc/rancher/k3s/k3s.yaml s3://${var.s3_bucket_kubeconfig}/${var.kubernetes_cluster_id}.yaml +BOOTSTRAP +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf deleted file mode 100644 index d0174308..00000000 --- a/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf +++ /dev/null @@ -1,75 +0,0 @@ -{% if vpc_qovery_network_mode == "WithoutNatGateways" %} -# Public subnets -resource "aws_subnet" "eks_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.eks_subnets_zone_a_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.eks_subnets_zone_b_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.eks_subnets_zone_c_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_route_table" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.eks_cluster.id - } - - // todo(pmavro): add tests for it when it will be available in the SDK - {% for route in vpc_custom_routing_table %} - route { - cidr_block = "{{ route.destination }}" - gateway_id = "{{ route.target }}" - } - {% endfor %} - - tags = local.tags_eks_vpc -} - -resource "aws_route_table_association" "eks_cluster_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - subnet_id = aws_subnet.eks_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - subnet_id = aws_subnet.eks_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - subnet_id = aws_subnet.eks_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} -{% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/elasticcache.tf b/lib/aws/bootstrap-ec2/elasticcache.tf index 44073c63..b19c3494 100644 --- a/lib/aws/bootstrap-ec2/elasticcache.tf +++ b/lib/aws/bootstrap-ec2/elasticcache.tf @@ -1,6 +1,6 @@ locals { tags_elasticache = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "Elasticache" } @@ -14,7 +14,7 @@ resource "aws_subnet" "elasticache_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.elasticache_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -24,7 +24,7 @@ resource "aws_subnet" "elasticache_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.elasticache_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -34,7 +34,7 @@ resource "aws_subnet" "elasticache_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.elasticache_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -43,27 +43,27 @@ resource "aws_route_table_association" "elasticache_cluster_zone_a" { count = length(var.elasticache_subnets_zone_a) subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticache_cluster_zone_b" { count = length(var.elasticache_subnets_zone_b) subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticache_cluster_zone_c" { count = length(var.elasticache_subnets_zone_c) subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_elasticache_subnet_group" "elasticache" { description = "Elasticache linked to ${var.kubernetes_cluster_id}" # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly - name = "elasticache-${aws_vpc.eks.id}" + name = "elasticache-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) } @@ -74,7 +74,7 @@ resource "aws_security_group_rule" "elasticache_remote_access" { description = "Allow Redis incoming access from anywhere" from_port = 6379 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 6379 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/elasticsearch.tf b/lib/aws/bootstrap-ec2/elasticsearch.tf index f5e873dd..bbd3a685 100644 --- a/lib/aws/bootstrap-ec2/elasticsearch.tf +++ b/lib/aws/bootstrap-ec2/elasticsearch.tf @@ -1,6 +1,6 @@ locals { tags_elasticsearch = merge( - local.tags_eks, + local.tags_ec2, { "Service" = "Elasticsearch" } @@ -14,7 +14,7 @@ resource "aws_subnet" "elasticsearch_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.elasticsearch_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -24,7 +24,7 @@ resource "aws_subnet" "elasticsearch_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.elasticsearch_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -34,7 +34,7 @@ resource "aws_subnet" "elasticsearch_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.elasticsearch_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -43,27 +43,27 @@ resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { count = length(var.elasticsearch_subnets_zone_a) subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { count = length(var.elasticsearch_subnets_zone_b) subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { count = length(var.elasticsearch_subnets_zone_c) subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_security_group" "elasticsearch" { name = "elasticsearch-${var.kubernetes_cluster_id}" description = "Elasticsearch security group" - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id ingress { from_port = 443 @@ -71,7 +71,7 @@ resource "aws_security_group" "elasticsearch" { protocol = "tcp" cidr_blocks = [ - aws_vpc.eks.cidr_block + aws_vpc.ec2.cidr_block ] } diff --git a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf index b12afa38..9288c182 100644 --- a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf +++ b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf @@ -23,7 +23,7 @@ resource "vault_generic_secret" "cluster-access" { EOT depends_on = [ - aws_eks_cluster.eks_cluster, + aws_ec2_cluster.ec2_cluster, ] } {% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/rds.tf b/lib/aws/bootstrap-ec2/rds.tf index 9207b0ca..dbc76ad3 100644 --- a/lib/aws/bootstrap-ec2/rds.tf +++ b/lib/aws/bootstrap-ec2/rds.tf @@ -15,7 +15,7 @@ data "aws_iam_policy_document" "rds_enhanced_monitoring" { locals { tags_rds = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "RDS" } @@ -28,7 +28,7 @@ resource "aws_subnet" "rds_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.rds_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -38,7 +38,7 @@ resource "aws_subnet" "rds_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.rds_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -48,7 +48,7 @@ resource "aws_subnet" "rds_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.rds_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -57,26 +57,26 @@ resource "aws_route_table_association" "rds_cluster_zone_a" { count = length(var.rds_subnets_zone_a) subnet_id = aws_subnet.rds_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "rds_cluster_zone_b" { count = length(var.rds_subnets_zone_b) subnet_id = aws_subnet.rds_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "rds_cluster_zone_c" { count = length(var.rds_subnets_zone_c) subnet_id = aws_subnet.rds_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_db_subnet_group" "rds" { description = "RDS linked to ${var.kubernetes_cluster_id}" - name = aws_vpc.eks.id + name = aws_vpc.ec2.id subnet_ids = flatten([aws_subnet.rds_zone_a.*.id, aws_subnet.rds_zone_b.*.id, aws_subnet.rds_zone_c.*.id]) tags = local.tags_rds @@ -102,7 +102,7 @@ resource "aws_security_group_rule" "postgres_remote_access" { description = "Allow RDS PostgreSQL incoming access from anywhere" from_port = 5432 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 5432 type = "ingress" } @@ -112,7 +112,7 @@ resource "aws_security_group_rule" "mysql_remote_access" { description = "Allow RDS MySQL incoming access from anywhere" from_port = 3306 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 3306 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf index 34373142..b5680921 100644 --- a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf +++ b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf @@ -8,7 +8,7 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" { } tags = merge( - local.tags_eks, + local.tags_ec2, { "Name" = "Kubernetes kubeconfig" } @@ -27,7 +27,7 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" { resource "aws_kms_key" "s3_kubeconfig_kms_encryption" { description = "s3 kubeconfig encryption" tags = merge( - local.tags_eks, + local.tags_ec2, { "Name" = "Kubeconfig Encryption" } diff --git a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf index b9517338..68c86897 100644 --- a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf +++ b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf @@ -56,57 +56,84 @@ variable "vpc_cidr_block" { type = string } -# Kubernetes +# ec2 -variable "eks_subnets_zone_a_private" { - description = "EKS private subnets Zone A" - default = {{ eks_zone_a_subnet_blocks_private }} +variable "ec2_image_info" { + description = "EC2 image information" + default = { + "name" = "debian-10-amd64*" + "owners" = "136693071363" + } + type = map(string) +} + +variable "ec2_instance" { + description = "EC2 instance configuration" + default = { + "instance_type" = "t3.micro" + } + type = map(string) +} + +variable "k3s_config" { + description = "K3s configuration" + default = { + "version" = "v1.20.15+k3s1" + "channel" = "latest" + "exec" = "--disable=traefik" + } + type = map(string) +} + +variable "ec2_subnets_zone_a_private" { + description = "EC2 private subnets Zone A" + default = {{ ec2_zone_a_subnet_blocks_private }} type = list(string) } -variable "eks_subnets_zone_b_private" { - description = "EKS private subnets Zone B" - default = {{ eks_zone_b_subnet_blocks_private }} +variable "ec2_subnets_zone_b_private" { + description = "EC2 private subnets Zone B" + default = {{ ec2_zone_b_subnet_blocks_private }} type = list(string) } -variable "eks_subnets_zone_c_private" { - description = "EKS private subnets Zone C" - default = {{ eks_zone_c_subnet_blocks_private }} +variable "ec2_subnets_zone_c_private" { + description = "EC2 private subnets Zone C" + default = {{ ec2_zone_c_subnet_blocks_private }} type = list(string) } {% if vpc_qovery_network_mode == "WithNatGateways" %} -variable "eks_subnets_zone_a_public" { - description = "EKS public subnets Zone A" - default = {{ eks_zone_a_subnet_blocks_public }} +variable "ec2_subnets_zone_a_public" { + description = "EC2 public subnets Zone A" + default = {{ ec2_zone_a_subnet_blocks_public }} type = list(string) } -variable "eks_subnets_zone_b_public" { - description = "EKS public subnets Zone B" - default = {{ eks_zone_b_subnet_blocks_public }} +variable "ec2_subnets_zone_b_public" { + description = "EC2 public subnets Zone B" + default = {{ ec2_zone_b_subnet_blocks_public }} type = list(string) } -variable "eks_subnets_zone_c_public" { - description = "EKS public subnets Zone C" - default = {{ eks_zone_c_subnet_blocks_public }} +variable "ec2_subnets_zone_c_public" { + description = "EC2 public subnets Zone C" + default = {{ ec2_zone_c_subnet_blocks_public }} type = list(string) } {% endif %} -variable "eks_cidr_subnet" { - description = "EKS CIDR (x.x.x.x/CIDR)" - default = {{ eks_cidr_subnet }} +variable "ec2_cidr_subnet" { + description = "EC2 CIDR (x.x.x.x/CIDR)" + default = {{ ec2_cidr_subnet }} type = number } -variable "eks_k8s_versions" { +variable "ec2_k8s_versions" { description = "Kubernetes version" default = { - "masters": "{{ eks_masters_version }}", - "workers": "{{ eks_workers_version }}", + "masters": "{{ ec2_masters_version }}", + "workers": "{{ ec2_workers_version }}", } type = map(string) } @@ -129,18 +156,12 @@ variable "kubernetes_cluster_name" { type = string } -variable "eks_access_cidr_blocks" { +variable "ec2_access_cidr_blocks" { description = "Kubernetes CIDR Block" - default = {{ eks_access_cidr_blocks }} + default = {{ ec2_access_cidr_blocks }} type = list(string) } -variable "eks_cloudwatch_log_group" { - description = "AWS cloudwatch log group for EKS" - default = "qovery-{{ eks_cloudwatch_log_group }}" - type = string -} - # S3 bucket name variable "s3_bucket_kubeconfig" { @@ -149,23 +170,6 @@ variable "s3_bucket_kubeconfig" { type = string } -# Engine info - -variable "qovery_engine_info" { - description = "Qovery engine info" - default = { - "token" = "{{ engine_version_controller_token }}" - "api_fqdn" = "{{ qovery_api_url }}" - } - type = map(string) -} - -variable "qovery_engine_replicas" { - description = "This variable is used to get random ID generated for the engine" - default = "2" - type = number -} - # Agent info variable "qovery_agent_info" { @@ -287,14 +291,6 @@ variable "elasticsearch_cidr_subnet" { type = number } -# Helm alert manager discord - -variable "discord_api_key" { - description = "discord url with token for used for alerting" - default = "{{ discord_api_key }}" - type = string -} - # Qovery features variable "log_history_enabled" { From d18d8e745ae5b5acd09c2a667da2b014e03cb1ad Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 22 Apr 2022 19:58:06 +0200 Subject: [PATCH 055/122] feat: add security rules to aws EC2 --- lib/aws/bootstrap-ec2/ec2-sec-group.tf | 19 +++++++++++++------ lib/aws/bootstrap-ec2/ec2.j2.tf | 4 +++- lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws/bootstrap-ec2/ec2-sec-group.tf index 02cd4bfc..a82bd0e9 100644 --- a/lib/aws/bootstrap-ec2/ec2-sec-group.tf +++ b/lib/aws/bootstrap-ec2/ec2-sec-group.tf @@ -13,15 +13,22 @@ resource "aws_security_group" "ec2_cluster" { tags = local.tags_ec2 } -# OPTIONAL: Allow inbound traffic from your local workstation external IP -# to the Kubernetes. You will need to replace A.B.C.D below with -# your real IP. Services like icanhazip.com can help you find this. -resource "aws_security_group_rule" "cluster_ingress_workstation_https" { - cidr_blocks = var.ec2_access_cidr_blocks - description = "Allow workstation to communicate with the cluster API Server" +resource "aws_security_group_rule" "https" { + cidr_blocks = "0.0.0.0/0" + description = "HTTPS connectivity" from_port = 443 protocol = "tcp" security_group_id = aws_security_group.ec2_cluster.id to_port = 443 type = "ingress" } + +resource "aws_security_group_rule" "ssh" { + cidr_blocks = "0.0.0.0/0" + description = "SSH remote access" + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.ec2_cluster.id + to_port = 22 + type = "ssh" +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws/bootstrap-ec2/ec2.j2.tf index e85ed5f9..2a9bb030 100644 --- a/lib/aws/bootstrap-ec2/ec2.j2.tf +++ b/lib/aws/bootstrap-ec2/ec2.j2.tf @@ -31,7 +31,9 @@ resource "aws_instance" "web" { associate_public_ip_address = true # security - #vpc_security_group_ids = [aws_vpc.ec2.*.id] + vpc_security_group_ids = [aws_vpc.ec2.id] + subnet_id = aws_subnet.ec2_zone_a.id + security_groups = [aws_security_group.ec2_cluster.id] user_data = local.bootstrap diff --git a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf index c4612160..e5235b07 100644 --- a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf +++ b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.66.0" + version = "~> 4.11.0" } external = { source = "hashicorp/external" From 5e0739586bf6239a1513d6f96430fd9936ef91a4 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 21:06:25 +0200 Subject: [PATCH 056/122] wip: refactor according to Benjamin C. feedback --- .../bootstrap}/backend.j2.tf | 0 .../bootstrap}/documentdb.tf | 0 .../bootstrap}/ec2-sec-group.tf | 0 .../bootstrap}/ec2-vpc-common.j2.tf | 0 .../bootstrap}/ec2-vpc.j2.tf | 0 .../bootstrap}/ec2.j2.tf | 0 .../bootstrap}/elasticcache.tf | 0 .../bootstrap}/elasticsearch.tf | 0 .../bootstrap}/qovery-vault.j2.tf | 0 .../bootstrap}/rds.tf | 0 .../bootstrap}/s3-qovery-buckets.tf | 0 .../bootstrap}/tf-default-vars.j2.tf | 0 .../bootstrap}/tf-providers-aws.j2.tf | 0 .../chart_values/mongodb/q-values.j2.yaml | 546 +++++++++ .../chart_values/mysql/q-values.j2.yaml | 603 ++++++++++ .../nginx-ingress/nginx-ingress.j2.yaml | 572 +++++++++ .../chart_values/postgresql/q-values.j2.yaml | 568 +++++++++ .../chart_values/redis/q-values.j2.yaml | 788 ++++++++++++ .../charts/q-application}/.helmignore | 0 .../charts/q-application/Chart.j2.yaml | 7 + .../templates/deployment.j2.yaml | 92 ++ .../templates/horizontal_autoscaler.j2.yaml | 19 + .../templates/networkpolicies.j2.yaml | 95 ++ .../q-application/templates/pdb.j2.yaml | 21 + .../q-application/templates/secret.j2.yaml | 17 + .../q-application/templates/service.j2.yaml | 26 + .../templates/statefulset.j2.yaml | 132 +++ .../charts/q-application/values.j2.yaml | 2 + .../charts/q-ingress-tls}/.helmignore | 0 .../charts/q-ingress-tls/Chart.j2.yaml | 6 + .../templates/cert-issuer.j2.yaml | 20 + .../templates/ingress-qovery.j2.yaml | 69 ++ .../charts/q-ingress-tls/values.j2.yaml | 2 + lib/aws-ec2/services/common/backend.j2.tf | 21 + .../services/common/common-variables.j2.tf | 167 +++ lib/aws-ec2/services/common/providers.j2.tf | 52 + lib/aws-ec2/services/mongodb/local-vars.j2.tf | 6 + lib/aws-ec2/services/mongodb/main.j2.tf | 114 ++ lib/aws-ec2/services/mongodb/variables.j2.tf | 43 + lib/aws-ec2/services/mysql/local-vars.j2.tf | 6 + lib/aws-ec2/services/mysql/main.j2.tf | 132 +++ lib/aws-ec2/services/mysql/variables.j2.tf | 67 ++ .../services/postgresql/local-vars.j2.tf | 6 + lib/aws-ec2/services/postgresql/main.j2.tf | 121 ++ .../services/postgresql/variables.j2.tf | 61 + lib/aws-ec2/services/redis/local-vars.j2.tf | 7 + lib/aws-ec2/services/redis/main.j2.tf | 114 ++ lib/aws-ec2/services/redis/variables.j2.tf | 37 + .../{bootstrap-eks => bootstrap}/README.md | 0 .../backend.j2.tf | 0 .../chart_values/external-dns.j2.yaml | 0 .../chart_values/grafana.j2.yaml | 0 .../chart_values/kube-prometheus-stack.yaml | 0 .../chart_values/loki.yaml | 0 .../chart_values/metrics-server.yaml | 0 .../chart_values/nginx-ingress.yaml | 0 .../chart_values/pleco.yaml | 0 .../charts/aws-calico/.helmignore | 0 .../charts/aws-calico/Chart.yaml | 0 .../charts/aws-calico/README.md | 0 .../charts/aws-calico/crds/crds.yaml | 0 .../charts/aws-calico/templates/_helpers.tpl | 0 .../aws-calico/templates/config-map.yaml | 0 .../aws-calico/templates/daemon-set.yaml | 0 .../aws-calico/templates/deployment.yaml | 0 .../templates/pod-disruption-budget.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../charts/aws-calico/templates/rbac.yaml | 0 .../templates/service-accounts.yaml | 0 .../charts/aws-calico/templates/service.yaml | 0 .../charts/aws-calico/values.yaml | 0 .../charts/aws-limits-exporter/.helmignore | 0 .../charts/aws-limits-exporter/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../templates/secrets.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../charts/aws-limits-exporter/values.yaml | 0 .../aws-node-termination-handler}/.helmignore | 0 .../aws-node-termination-handler/Chart.yaml | 0 .../aws-node-termination-handler/README.md | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/daemonset.yaml | 0 .../templates/psp.yaml | 0 .../templates/serviceaccount.yaml | 0 .../aws-node-termination-handler/values.yaml | 0 .../bootstrap/charts/aws-ui-view/.helmignore | 22 + .../charts/aws-ui-view/Chart.yaml | 0 .../charts/aws-ui-view/templates/_helpers.tpl | 0 .../aws-ui-view/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../charts/aws-ui-view/values.yaml | 0 .../bootstrap/charts/aws-vpc-cni/.helmignore | 22 + .../charts/aws-vpc-cni/Chart.yaml | 0 .../charts/aws-vpc-cni/README.md | 0 .../charts/aws-vpc-cni/templates/_helpers.tpl | 0 .../aws-vpc-cni/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../aws-vpc-cni/templates/configmap.yaml | 0 .../templates/customresourcedefinition.yaml | 0 .../aws-vpc-cni/templates/daemonset.yaml | 0 .../aws-vpc-cni/templates/eniconfig.yaml | 0 .../aws-vpc-cni/templates/serviceaccount.yaml | 0 .../charts/aws-vpc-cni/values.yaml | 0 .../charts/coredns-config/.helmignore | 0 .../charts/coredns-config/Chart.yaml | 0 .../coredns-config/templates/_helpers.tpl | 0 .../coredns-config/templates/configmap.yml | 0 .../charts/coredns-config/values.yaml | 0 .../charts/iam-eks-user-mapper/.helmignore | 0 .../charts/iam-eks-user-mapper/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../iam-eks-user-mapper/templates/rbac.yaml | 0 .../iam-eks-user-mapper/templates/secret.yaml | 0 .../templates/serviceaccount.yaml | 0 .../charts/iam-eks-user-mapper/values.yaml | 0 .../charts/q-storageclass/.helmignore | 0 .../charts/q-storageclass/Chart.yaml | 0 .../q-storageclass/templates/_helpers.tpl | 0 .../templates/storageclass.yaml | 0 .../charts/q-storageclass/values.yaml | 0 .../documentdb.tf | 0 .../eks-ebs-csi-driver.tf | 0 .../eks-gen-kubectl-config.j2.tf | 0 .../eks-master-cluster.j2.tf | 0 .../eks-master-iam.tf | 0 .../eks-master-sec-group.tf | 0 .../eks-s3-kubeconfig-store.tf | 0 .../eks-vpc-common.j2.tf | 0 .../eks-vpc-with-nat-gateways.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 0 .../eks-workers-iam.tf | 0 .../eks-workers-nodes.j2.tf | 0 .../eks-workers-sec-group.tf | 0 .../elasticcache.tf | 0 .../elasticsearch.tf | 0 .../helm-aws-iam-eks-user-mapper.tf | 0 .../helm-cluster-autoscaler.j2.tf | 0 .../helm-grafana.j2.tf | 0 .../helm-loki.j2.tf | 0 .../helm-nginx-ingress.tf | 0 .../{bootstrap-eks => bootstrap}/helper.j2.sh | 0 .../qovery-tf-config.j2.tf | 0 .../qovery-vault.j2.tf | 0 lib/aws/{bootstrap-eks => bootstrap}/rds.tf | 0 .../s3-qovery-buckets.tf | 0 .../tf-default-vars.j2.tf | 0 .../tf-providers-aws.j2.tf | 0 lib/edge/aws/backend.j2.tf | 10 - lib/edge/aws/documentdb.tf | 81 -- lib/edge/aws/eks-vpc-common.j2.tf | 42 - .../aws/eks-vpc-without-nat-gateways.j2.tf | 75 -- lib/edge/aws/elasticcache.tf | 80 -- lib/edge/aws/elasticsearch.tf | 79 -- lib/edge/aws/qovery-vault.j2.tf | 29 - lib/edge/aws/rds.tf | 118 -- lib/edge/aws/s3-qovery-buckets.tf | 44 - lib/edge/aws/tf-default-vars.j2.tf | 319 ----- lib/edge/aws/tf-providers-aws.j2.tf | 60 - src/cloud_provider/aws/kubernetes/ec2.rs | 390 ++++++ src/cloud_provider/aws/kubernetes/eks.rs | 670 +++++++++++ src/cloud_provider/aws/kubernetes/mod.rs | 1054 +---------------- test_utilities/src/common.rs | 4 +- 168 files changed, 5657 insertions(+), 1981 deletions(-) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/backend.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/documentdb.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-sec-group.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-vpc-common.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-vpc.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/elasticcache.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/elasticsearch.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/qovery-vault.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/rds.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/s3-qovery-buckets.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/tf-default-vars.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/tf-providers-aws.j2.tf (100%) create mode 100644 lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/mysql/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml create mode 100644 lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/redis/q-values.j2.yaml rename lib/{aws/bootstrap-eks/charts/aws-node-termination-handler => aws-ec2/charts/q-application}/.helmignore (100%) create mode 100644 lib/aws-ec2/charts/q-application/Chart.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/secret.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/service.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/values.j2.yaml rename lib/{aws/bootstrap-eks/charts/aws-ui-view => aws-ec2/charts/q-ingress-tls}/.helmignore (100%) create mode 100644 lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml create mode 100644 lib/aws-ec2/services/common/backend.j2.tf create mode 100644 lib/aws-ec2/services/common/common-variables.j2.tf create mode 100644 lib/aws-ec2/services/common/providers.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/main.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/variables.j2.tf create mode 100644 lib/aws-ec2/services/mysql/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/mysql/main.j2.tf create mode 100644 lib/aws-ec2/services/mysql/variables.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/main.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/variables.j2.tf create mode 100644 lib/aws-ec2/services/redis/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/redis/main.j2.tf create mode 100644 lib/aws-ec2/services/redis/variables.j2.tf rename lib/aws/{bootstrap-eks => bootstrap}/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/backend.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/external-dns.j2.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/grafana.j2.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/kube-prometheus-stack.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/loki.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/metrics-server.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/nginx-ingress.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/pleco.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/crds/crds.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/config-map.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/daemon-set.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/pod-disruption-budget.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/podsecuritypolicy.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/rbac.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/service-accounts.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/service.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/secrets.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/service.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/servicemonitor.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/values.yaml (100%) rename lib/aws/{bootstrap-eks/charts/aws-vpc-cni => bootstrap/charts/aws-node-termination-handler}/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/psp.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/values.yaml (100%) create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/.helmignore rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/values.yaml (100%) create mode 100644 lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/configmap.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/customresourcedefinition.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/eniconfig.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/templates/configmap.yml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/rbac.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/secret.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/templates/storageclass.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/documentdb.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-ebs-csi-driver.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-gen-kubectl-config.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-cluster.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-iam.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-sec-group.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-s3-kubeconfig-store.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-common.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-with-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-without-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-iam.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-nodes.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-sec-group.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/elasticcache.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/elasticsearch.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-aws-iam-eks-user-mapper.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-cluster-autoscaler.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-grafana.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-loki.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-nginx-ingress.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helper.j2.sh (100%) rename lib/aws/{bootstrap-eks => bootstrap}/qovery-tf-config.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/qovery-vault.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/rds.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/s3-qovery-buckets.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/tf-default-vars.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/tf-providers-aws.j2.tf (100%) delete mode 100644 lib/edge/aws/backend.j2.tf delete mode 100644 lib/edge/aws/documentdb.tf delete mode 100644 lib/edge/aws/eks-vpc-common.j2.tf delete mode 100644 lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf delete mode 100644 lib/edge/aws/elasticcache.tf delete mode 100644 lib/edge/aws/elasticsearch.tf delete mode 100644 lib/edge/aws/qovery-vault.j2.tf delete mode 100644 lib/edge/aws/rds.tf delete mode 100644 lib/edge/aws/s3-qovery-buckets.tf delete mode 100644 lib/edge/aws/tf-default-vars.j2.tf delete mode 100644 lib/edge/aws/tf-providers-aws.j2.tf create mode 100644 src/cloud_provider/aws/kubernetes/ec2.rs create mode 100644 src/cloud_provider/aws/kubernetes/eks.rs diff --git a/lib/aws/bootstrap-ec2/backend.j2.tf b/lib/aws-ec2/bootstrap/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/backend.j2.tf rename to lib/aws-ec2/bootstrap/backend.j2.tf diff --git a/lib/aws/bootstrap-ec2/documentdb.tf b/lib/aws-ec2/bootstrap/documentdb.tf similarity index 100% rename from lib/aws/bootstrap-ec2/documentdb.tf rename to lib/aws-ec2/bootstrap/documentdb.tf diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-sec-group.tf rename to lib/aws-ec2/bootstrap/ec2-sec-group.tf diff --git a/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf b/lib/aws-ec2/bootstrap/ec2-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf rename to lib/aws-ec2/bootstrap/ec2-vpc-common.j2.tf diff --git a/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf b/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-vpc.j2.tf rename to lib/aws-ec2/bootstrap/ec2-vpc.j2.tf diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2.j2.tf rename to lib/aws-ec2/bootstrap/ec2.j2.tf diff --git a/lib/aws/bootstrap-ec2/elasticcache.tf b/lib/aws-ec2/bootstrap/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap-ec2/elasticcache.tf rename to lib/aws-ec2/bootstrap/elasticcache.tf diff --git a/lib/aws/bootstrap-ec2/elasticsearch.tf b/lib/aws-ec2/bootstrap/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap-ec2/elasticsearch.tf rename to lib/aws-ec2/bootstrap/elasticsearch.tf diff --git a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf b/lib/aws-ec2/bootstrap/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/qovery-vault.j2.tf rename to lib/aws-ec2/bootstrap/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap-ec2/rds.tf b/lib/aws-ec2/bootstrap/rds.tf similarity index 100% rename from lib/aws/bootstrap-ec2/rds.tf rename to lib/aws-ec2/bootstrap/rds.tf diff --git a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf b/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap-ec2/s3-qovery-buckets.tf rename to lib/aws-ec2/bootstrap/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf b/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/tf-default-vars.j2.tf rename to lib/aws-ec2/bootstrap/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf b/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf rename to lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf diff --git a/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml b/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml new file mode 100644 index 00000000..9f001e92 --- /dev/null +++ b/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml @@ -0,0 +1,546 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass +## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +# namespaceOverride: my-global-namespace + +image: + ## Bitnami MongoDB registry + ## + registry: quay.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: true + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: +nameOverride: {{ sanitized_name }} + +## String to fully override mongodb.fullname template +## +# fullnameOverride: +fullnameOverride: {{ sanitized_name }} + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +# Add custom extra environment variables to all the MongoDB containers +# extraEnvVars: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: + limits: {} + requests: + cpu: "{{ total_cpus }}" + memory: "{{ total_ram_in_mib }}Mi" + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +mongodbRootPassword: '{{ database_password }}' + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +mongodbUsername: '{{ database_login }}' +mongodbPassword: '{{ database_password }}' +mongodbDatabase: {{ database_db_name }} + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: false + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + # name: svc-mongo + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + # clusterIP: None + port: {{ private_port }} + qovery_name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +## Use StatefulSet instead of Deployment when deploying standalone +useStatefulSet: true + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + secondary: 1 + arbiter: 1 + # maxUnavailable: + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional abels to apply to the deployment or statefulsets +labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Define Separate nodeSelector for secondaries +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelectorSecondary: {} + +## Define Separate nodeSelector for arbiter +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelectorArbiter: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Add sidecars to the pod +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecars: [] +## Array to add extra volumes +## +extraVolumes: [] +## Array to add extra mounts (normally used with extraVolumes) +## +extraVolumeMounts: [] + +## Add sidecars to the arbiter pod +# used only when replica set is enabled +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecarsArbiter: [] +## Array to add extra volumes to the arbiter +# used only when replica set is enabled +## +extraVolumesArbiter: [] +## Array to add extra mounts (normally used with extraVolumes) to the arbiter +# used only when replica set is enabled +## +extraVolumeMountsArbiter: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # storageClassSecondary: "-" + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.local + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + tls: + - hosts: + - mongodb.local + secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.11.0-debian-10-r45 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml b/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml new file mode 100644 index 00000000..daa4475d --- /dev/null +++ b/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml @@ -0,0 +1,603 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MySQL image +## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## +image: + debug: false + registry: quay.io + repository: bitnami/mysql + tag: "{{ version }}" + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override mysql.fullname template (will maintain the release name) +## +# nameOverride: +nameOverride: {{ sanitized_name }} + +## String to fully override mysql.fullname template +## +# fullnameOverride: +fullnameOverride: {{ sanitized_name }} + +## Cluster domain +## +clusterDomain: cluster.local + +commonLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: + cpu: "{{ database_total_cpus }}" + memory: "{{ database_ram_size_in_mib }}Mi" + +## Use existing secret (ignores root, db and replication passwords) +## +# existingSecret: + +## Admin (root) credentials +## +root: + ## MySQL admin password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + password: '{{ database_password }}' + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount admin password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + +## Custom user/db credentials +## +db: + ## MySQL username and password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run + ## Note that this user should be different from the MySQL replication user (replication.user) + ## + user: '{{ database_login }}' + password: '{{ database_password }}' + ## Database to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run + ## + name: {{ sanitized_name }} + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + +## Replication configuration +## +replication: + ## Enable replication. This enables the creation of replicas of MySQL. If false, only a + ## master deployment would be created + ## + enabled: false + ## + ## MySQL replication user + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## Note that this user should be different from the MySQL user (db.user) + ## + user: replicator + ## MySQL replication user password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: false + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." +# +## ConfigMap with scripts to be run at first boot +## Note: This will override initdbScripts +# initdbScriptsConfigMap: + +serviceAccount: + create: true + ## Specify the name of the service account created/used + # name: + +## Master nodes parameters +## +master: + ## Configure MySQL with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + + ## updateStrategy for master nodes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + + ## Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL master pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + + ## MySQL master containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## Example: + ## containerSecurityContext: + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: {} + + ## MySQL master containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + + ## MySQL master containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Enable persistence using PVCs on master nodes + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath: /bitnami/mysql + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + storageClass: "aws-ebs-gp2-0" + ## PVC annotations + ## + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: {{ database_disk_size_in_gib }}Gi + ## Use an existing PVC + ## + # existingClaim: + +## Slave nodes parameters +## +slave: + ## Number of slave replicas + ## + replicas: 2 + + ## Configure MySQL slave with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + + ## updateStrategy for slave nodes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + + ## Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL slave pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + + ## MySQL slave containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## Example: + ## containerSecurityContext: + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: {} + + ## MySQL slave containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + + ## MySQL slave containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Enable persistence using PVCs on slave nodes + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath: /bitnami/mysql + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## PVC annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: {{ database_disk_size_in_gib }}Gi + ## Use an existing PVC + ## + # existingClaim: + +## MySQL Service properties +## +service: + ## MySQL Service type + ## + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + name: {{ service_name }} + + ## MySQL Service port + ## + port: 3306 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: + master: + slave: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP for the MySQL Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + # master: + # slave: + +## MySQL prometheus metrics parameters +## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint +## +metrics: + enabled: false + ## Bitnami MySQL Prometheus exporter image + ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r127 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## MySQL Prometheus exporter containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 0.5 + # memory: 256Mi + requests: {} + # cpu: 0.5 + # memory: 256Mi + + ## MySQL Prometheus exporter service parameters + ## + service: + type: ClusterIP + port: 9104 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus diff --git a/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml b/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml new file mode 100644 index 00000000..da7b7193 --- /dev/null +++ b/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml @@ -0,0 +1,572 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + tag: "0.30.0" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: true + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: + client_max_body_size: 100m + proxy-body-size: 100m + server-tokens: "false" + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader-{{ id }} + + ## Name of the ingress class to route through this controller + ## + ingressClass: "{{ id }}" + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: true + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: true + namespace: "{{ namespace }}" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: {{ nginx_minimum_replicas }} + + minAvailable: 1 + + resources: + limits: + cpu: {{ nginx_limit_cpu }} + memory: {{ nginx_limit_memory }} + requests: + cpu: {{ nginx_requests_cpu }} + memory: {{ nginx_requests_memory }} + + autoscaling: + enabled: {{ nginx_enable_horizontal_autoscaler }} + minReplicas: {{ nginx_minimum_replicas }} + maxReplicas: {{ nginx_maximum_replicas }} + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + labels: + app_id : "{{ id }}" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "Local" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: k8s.gcr.io/defaultbackend-amd64 + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: true + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" diff --git a/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml b/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml new file mode 100644 index 00000000..3e908351 --- /dev/null +++ b/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml @@ -0,0 +1,568 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: quay.io + repository: bitnami/postgresql + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +nameOverride: {{ sanitized_name }} + +## String to fully override postgresql.fullname template +## +fullnameOverride: {{ sanitized_name }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## sanitized_name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: true + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +postgresqlPostgresPassword: '{{ database_password }}' + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: '{{ database_login }}' + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlPassword: '{{ database_password }}' + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +postgresqlDatabase: {{ database_db_name }} + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +initdbUser: postgres +initdbPassword: '{{ database_password }}' + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + # clusterIP: None + port: 5432 + name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + annotations: {} + podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: "{{ database_ram_size_in_mib }}Mi" + cpu: "{{ database_total_cpus }}" + +## Add annotations to all the deployed resources +## +commonAnnotiations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r116 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/lib/aws-ec2/chart_values/redis/q-values.j2.yaml b/lib/aws-ec2/chart_values/redis/q-values.j2.yaml new file mode 100644 index 00000000..007bb33a --- /dev/null +++ b/lib/aws-ec2/chart_values/redis/q-values.j2.yaml @@ -0,0 +1,788 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: quay.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +nameOverride: {{ sanitized_name }} + +## String to fully override redis.fullname template +## +fullnameOverride: {{ sanitized_name }} + +## Cluster settings +cluster: + enabled: false + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: {{ version }} + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: true + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: '{{ database_password }}' +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + port: 6379 + name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + labels: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.13.1-debian-10-r6 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore b/lib/aws-ec2/charts/q-application/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore rename to lib/aws-ec2/charts/q-application/.helmignore diff --git a/lib/aws-ec2/charts/q-application/Chart.j2.yaml b/lib/aws-ec2/charts/q-application/Chart.j2.yaml new file mode 100644 index 00000000..c8a03105 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/Chart.j2.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: qovery +description: A Qovery Helm chart for Kubernetes deployments +type: application +version: 0.2.0 +appVersion: {{ helm_app_version }} +icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png diff --git a/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml b/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml new file mode 100644 index 00000000..84053b29 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml @@ -0,0 +1,92 @@ +{%- if not is_storage %} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %} +spec: + replicas: {{ min_instances }} + strategy: + type: RollingUpdate + {% if max_instances == 1 %} + rollingUpdate: + maxSurge: 1 + {% endif %} + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + template: + metadata: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - {{ sanitized_name }} + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + terminationGracePeriodSeconds: 60 + securityContext: {} + {%- if is_registry_secret %} + imagePullSecrets: + - name: {{ registry_secret }} + {%- endif %} + containers: + - name: {{ sanitized_name }} + image: "{{ image_name_with_tag }}" + env: + {%- for ev in environment_variables %} + - name: "{{ ev.key }}" + valueFrom: + secretKeyRef: + name: {{ sanitized_name }} + key: {{ ev.key }} + {%- endfor %} + {%- if private_port %} + ports: + {%- for port in ports %} + - containerPort: {{ port.port }} + name: "p{{ port.port }}" + protocol: TCP + {%- endfor %} + readinessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 20 + {%- endif %} + resources: + limits: + cpu: {{ cpu_burst }} + memory: {{ total_ram_in_mib }}Mi + requests: + cpu: {{ total_cpus }} + memory: {{ total_ram_in_mib }}Mi +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml b/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml new file mode 100644 index 00000000..d14331e7 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml @@ -0,0 +1,19 @@ +{%- if not is_storage and min_instances != max_instances %} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + envId: {{ environment_id }} + appId: {{ id }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ sanitized_name }} + minReplicas: {{ min_instances }} + maxReplicas: {{ max_instances }} + targetCPUUtilizationPercentage: 60 +{%- endif %} + diff --git a/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml b/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml new file mode 100644 index 00000000..14ab70f4 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml @@ -0,0 +1,95 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-default + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + # Deny all ingress by default to this application + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-app-access + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + # Then allow some ingress to this application + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + ingress: + # Allow ingress from same environment + - from: + - podSelector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + + # Allow ingress from everywhere but only to application port + {% if is_private_port %} + - ports: + - port: {{ private_port }} + {% endif %} + # FIXME(sileht): Previous rule is not perfect as other pods/namespaces can + # access to the application port without going through the Ingress object, + # but that's not critical neither + # Only way to fix that is to allow lb and kube-proxy to access the namespace/pods explictly via IP, eg: + # - from: + # - ipBlock: + # cidr: 10.0.99.179/32 + # - ipBlock: + # cidr: 10.0.28.216/32 + # - ipBlock: + # cidr: 10.0.98.42/32 + # - ipBlock: + # cidr: 10.0.59.208/32 + # Since user pods, kube-proxy, and lbs are all in 10.0.0.0/8 we can't write generic rule like: + # - ipBlock: + # cidr: 0.0.0.0/0 + # except: [10.0.0.0/8] + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-deny-aws-metadata-server + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 169.254.169.254/32 diff --git a/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml b/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml new file mode 100644 index 00000000..4e8b8015 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml @@ -0,0 +1,21 @@ +{%- if not is_storage %} +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +spec: + maxUnavailable: 10% + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +{%- endif %} \ No newline at end of file diff --git a/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml b/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml new file mode 100644 index 00000000..86625f6e --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +type: Opaque +data: + {%- for ev in environment_variables %} + {{ ev.key }}: |- + {{ ev.value }} + {%- endfor %} diff --git a/lib/aws-ec2/charts/q-application/templates/service.j2.yaml b/lib/aws-ec2/charts/q-application/templates/service.j2.yaml new file mode 100644 index 00000000..bb258f90 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/service.j2.yaml @@ -0,0 +1,26 @@ +{%- if (ports is defined) and ports %} +apiVersion: v1 +kind: Service +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + type: ClusterIP + ports: + {%- for port in ports %} + - protocol: TCP + name: "p{{ port.port }}" + port: {{ port.port }} + targetPort: {{ port.port }} + {%- endfor %} + selector: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml b/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml new file mode 100644 index 00000000..fb7cf72b --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml @@ -0,0 +1,132 @@ +{%- if is_storage %} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %} +spec: + replicas: {{ min_instances }} + serviceName: {{ sanitized_name }} + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + template: + metadata: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - {{ sanitized_name }} + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + terminationGracePeriodSeconds: 60 + securityContext: {} + {%- if is_registry_secret %} + imagePullSecrets: + - name: {{ registry_secret }} + {%- endif %} + containers: + - name: {{ sanitized_name }} + image: "{{ image_name_with_tag }}" + env: + {%- for ev in environment_variables %} + - name: "{{ ev.key }}" + valueFrom: + secretKeyRef: + name: {{ sanitized_name }} + key: {{ ev.key }} + {%- endfor %} + {%- if private_port %} + ports: + {%- for port in ports %} + - containerPort: {{ port.port }} + name: "p{{ port.port }}" + protocol: TCP + {%- endfor %} + readinessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 20 + {%- endif %} + resources: + limits: + cpu: {{ cpu_burst }} + memory: {{ total_ram_in_mib }}Mi + requests: + cpu: {{ total_cpus }} + memory: {{ total_ram_in_mib }}Mi + volumeMounts: +{%- for s in storage %} + - name: {{ s.id }} + mountPath: {{ s.mount_point }} +{%- endfor %} + volumeClaimTemplates: +{%- for s in storage %} +{% if clone %} + - metadata: + name: {{ s.id }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + diskId: {{ s.id }} + diskType: {{ s.storage_type }} + spec: + accessModes: + - ReadWriteOnce + storageClassName: aws-ebs-{{ s.storage_type }}-0 + dataSource: + name: {{ s.id }} + kind: PersistentVolumeClaim + resources: + requests: + storage: {{ disk.size_in_gib }}Gi +{% else %} + - metadata: + name: {{ s.id }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + diskId: {{ s.id }} + diskType: {{ s.storage_type }} + spec: + accessModes: + - ReadWriteOnce + storageClassName: aws-ebs-{{ s.storage_type }}-0 + resources: + requests: + storage: {{ s.size_in_gib }}Gi +{%- endif %} +{%- endfor %} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/values.j2.yaml b/lib/aws-ec2/charts/q-application/values.j2.yaml new file mode 100644 index 00000000..667115e7 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/values.j2.yaml @@ -0,0 +1,2 @@ +# Don't add anyhting here +# Jinja2 is taken on behalf of Go template diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore b/lib/aws-ec2/charts/q-ingress-tls/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore rename to lib/aws-ec2/charts/q-ingress-tls/.helmignore diff --git a/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml new file mode 100644 index 00000000..060f9a9e --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: qovery +description: A Qovery Helm chart for Kubernetes deployments +type: application +version: 0.2.0 +icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png diff --git a/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml new file mode 100644 index 00000000..cfb54db6 --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml @@ -0,0 +1,20 @@ +{%- if custom_domains|length > 0 %} +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: {{ id }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} +spec: + acme: + server: {{ spec_acme_server }} + email: {{ spec_acme_email }} + privateKeySecretRef: + name: acme-{{ id }}-key + solvers: + - http01: + ingress: + class: nginx-qovery +{%- endif %} diff --git a/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml new file mode 100644 index 00000000..5cf11a24 --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml @@ -0,0 +1,69 @@ +{%- if routes|length >= 1 %} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + routerName: {{ sanitized_name }} + routerId: {{ id }} + envId: {{ environment_id }} + fqdn: "{{ router_default_domain }}" + annotations: + external-dns.alpha.kubernetes.io/hostname: {{ router_default_domain }} + external-dns.alpha.kubernetes.io/ttl: "300" + kubernetes.io/tls-acme: "true" + {%- if custom_domains|length > 0 %} + cert-manager.io/issuer: {{ id }} + {%- else %} + cert-manager.io/cluster-issuer: {{ metadata_annotations_cert_manager_cluster_issuer }} + {%- endif %} + kubernetes.io/ingress.class: "nginx-qovery" + ingress.kubernetes.io/ssl-redirect: "true" + #nginx.ingress.kubernetes.io/enable-cors: "true" + #nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,x-csrftoken" + {%- if sticky_sessions_enabled == true %} + # https://kubernetes.github.io/ingress-nginx/examples/affinity/cookie/ + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-secure: "true" + nginx.ingress.kubernetes.io/session-cookie-name: "INGRESSCOOKIE_QOVERY" + nginx.ingress.kubernetes.io/session-cookie-max-age: "85400" # 1 day + nginx.ingress.kubernetes.io/session-cookie-expires: "85400" # 1 day + nginx.ingress.kubernetes.io/session-cookie-samesite: "Lax" + {%- endif %} +spec: + tls: + {%- if custom_domains|length > 0 %} + - secretName: "router-tls-{{ id }}" + hosts: + {%- for domain in custom_domains %} + - "{{ domain.domain }}" + {%- endfor %} + {%- endif %} + # We dont use secret name as we want to rely on default tls certificate from ingress controller + # which has our wildcard certificate https://cert-manager.io/next-docs/faq/kubed/ + rules: + - host: "{{ router_default_domain }}" + http: + paths: + {%- for route in routes %} + - path: "{{ route.path }}" + backend: + serviceName: "{{ route.application_name }}" + servicePort: {{ route.application_port }} + {%- endfor %} + {%- for domain in custom_domains %} + - host: "{{ domain.domain }}" + http: + paths: + {%- for route in routes %} + - path: "{{ route.path }}" + backend: + serviceName: "{{ route.application_name }}" + servicePort: {{ route.application_port }} + {%- endfor %} + {%- endfor %} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml new file mode 100644 index 00000000..2afc8c8a --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml @@ -0,0 +1,2 @@ +# Don't add anyhting here(git hash-object -t tree /dev/null) +# Jinja2 is taken on behalf of Go template diff --git a/lib/aws-ec2/services/common/backend.j2.tf b/lib/aws-ec2/services/common/backend.j2.tf new file mode 100644 index 00000000..f0746dc0 --- /dev/null +++ b/lib/aws-ec2/services/common/backend.j2.tf @@ -0,0 +1,21 @@ +terraform { + backend "kubernetes" { + secret_suffix = "{{ tfstate_suffix_name }}" + load_config_file = true + config_path = "{{ kubeconfig_path }}" + namespace = "{{ namespace }}" + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws-iam-authenticator" + args = [ + "token", + "-i", + "qovery-{{kubernetes_cluster_id}}"] + env = { + AWS_ACCESS_KEY_ID = "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}" + AWS_DEFAULT_REGION = "{{ region }}" + } + } + } +} diff --git a/lib/aws-ec2/services/common/common-variables.j2.tf b/lib/aws-ec2/services/common/common-variables.j2.tf new file mode 100644 index 00000000..0d52b167 --- /dev/null +++ b/lib/aws-ec2/services/common/common-variables.j2.tf @@ -0,0 +1,167 @@ +# Qovery +variable "cluster_name" { + description = "Kubernetes cluster name" + default = "{{ cluster_name }}" + type = string +} + +variable "region" { + description = "AWS region to store terraform state and lock" + default = "{{ region }}" + type = string +} + +variable "kubernetes_cluster_id" { + description = "Kubernetes cluster name with region" + default = "{{ kubernetes_cluster_id }}" + type = string +} + +variable "region_cluster_name" { + description = "AWS region to store terraform state and lock" + default = "{{ region }}-{{ cluster_name }}" + type = string +} + +variable "q_project_id" { + description = "Qovery project ID" + default = "{{ project_id }}" + type = string +} + +variable "q_customer_id" { + description = "Qovery customer ID" + default = "{{ owner_id }}" + type = string +} + +variable "q_environment_id" { + description = "Qovery client environment" + default = "{{ environment_id }}" + type = string +} + +variable "database_tags" { + description = "Qovery database tags" + default = { + "cluster_name" = "{{ cluster_name }}" + "cluster_id" = "{{ kubernetes_cluster_id }}" + "region" = "{{ region }}" + "q_client_id" = "{{ owner_id }}" + "q_environment_id" = "{{ environment_id }}" + "q_project_id" = "{{ project_id }}" + {% if resource_expiration_in_seconds is defined %} + "ttl" = "{{ resource_expiration_in_seconds }}" + {% endif %} + {% if snapshot is defined and snapshot["snapshot_id"] %} meta_last_restored_from = { { snapshot['snapshot_id'] } } + {% endif %} + } + type = map +} + +{%- if resource_expiration_in_seconds is defined %} +# Pleco ttl +variable "resource_expiration_in_seconds" { + description = "Resource expiration in seconds" + default = {{resource_expiration_in_seconds}} + type = number +} +{% endif %} + +{%- if snapshot is defined %} +# Snapshots +variable "snapshot_identifier" { + description = "Snapshot ID to restore" + default = "{{ snapshot['snapshot_id']}}" + type = string +} +{% endif %} + +# Network + +variable "publicly_accessible" { + description = "Instance publicly accessible" + default = {{ publicly_accessible }} + type = bool +} + +variable "multi_az" { + description = "Multi availability zones" + default = true + type = bool +} + +# Upgrades + +variable "auto_minor_version_upgrade" { + description = "Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window" + default = true + type = bool +} + +variable "apply_changes_now" { + description = "Apply changes now or during the during the maintenance window" + default = false + type = bool +} + +variable "preferred_maintenance_window" { + description = "Maintenance window" + default = "Tue:02:00-Tue:04:00" + type = string +} + +# Monitoring + +variable "performance_insights_enabled" { + description = "Specifies whether Performance Insights are enabled" + default = true + type = bool +} + +variable "performance_insights_enabled_retention" { + description = "The amount of time in days to retain Performance Insights data" + default = 7 + type = number +} + +# Backups + +variable "backup_retention_period" { + description = "Backup retention period" + default = 14 + type = number +} + +variable "preferred_backup_window" { + description = "Maintenance window" + default = "00:00-01:00" + type = string +} + +variable "delete_automated_backups" { + description = "Delete automated backups" + default = {{delete_automated_backups}} + type = bool +} + +variable "skip_final_snapshot" { + description = "Skip final snapshot" + default = {{ skip_final_snapshot }} + type = bool +} + +variable "final_snapshot_name" { + description = "Name of the final snapshot before the database goes deleted" + default = "{{ final_snapshot_name }}" + type = string +} + +{%- if snapshot is defined %} +# Snapshots +variable "snapshot_identifier" { + description = "Snapshot ID to restore" + default = "{{ snapshot['snapshot_id']}}" + type = string +} +{% endif %} \ No newline at end of file diff --git a/lib/aws-ec2/services/common/providers.j2.tf b/lib/aws-ec2/services/common/providers.j2.tf new file mode 100644 index 00000000..95abc7b1 --- /dev/null +++ b/lib/aws-ec2/services/common/providers.j2.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 3.36.0" + } + helm = { + source = "hashicorp/helm" + version = "~> 1.3.2" + } + local = { + source = "hashicorp/local" + version = "~> 1.4" + } + time = { + source = "hashicorp/time" + version = "~> 0.3" + } + } + required_version = ">= 0.14" +} + +provider "aws" { + profile = "default" + region = "{{ region }}" + access_key = "{{ aws_access_key }}" + secret_key = "{{ aws_secret_key }}" +} + +data aws_eks_cluster eks_cluster { + name = "qovery-{{kubernetes_cluster_id}}" +} + +provider "helm" { + kubernetes { + host = data.aws_eks_cluster.eks_cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority.0.data) + load_config_file = false + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws-iam-authenticator" + args = ["token", "-i", "qovery-{{kubernetes_cluster_id}}"] + env = { + AWS_ACCESS_KEY_ID = "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}" + AWS_DEFAULT_REGION = "{{ region }}" + } + } + } +} + +resource "time_static" "on_db_create" {} diff --git a/lib/aws-ec2/services/mongodb/local-vars.j2.tf b/lib/aws-ec2/services/mongodb/local-vars.j2.tf new file mode 100644 index 00000000..4ea911fc --- /dev/null +++ b/lib/aws-ec2/services/mongodb/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + mongodb_database_tags = merge (var.database_tags, { + database_identifier = var.documentdb_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mongodb/main.j2.tf b/lib/aws-ec2/services/mongodb/main.j2.tf new file mode 100644 index 00000000..8f20c381 --- /dev/null +++ b/lib/aws-ec2/services/mongodb/main.j2.tf @@ -0,0 +1,114 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["DocumentDB"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +resource "helm_release" "documentdb_instance_external_name" { + name = "${aws_docdb_cluster.documentdb_cluster.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_docdb_cluster.documentdb_cluster.endpoint + } + + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + + set { + name = "app_id" + value = "{{database_id}}" + } + + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_docdb_cluster.documentdb_cluster + ] +} + +resource "aws_docdb_cluster_instance" "documentdb_cluster_instances" { + count = var.documentdb_instances_number + + cluster_identifier = aws_docdb_cluster.documentdb_cluster.id + identifier = "${var.documentdb_identifier}-${count.index}" + + instance_class = var.instance_class + + # Maintenance and upgrade + auto_minor_version_upgrade = var.auto_minor_version_upgrade + preferred_maintenance_window = var.preferred_maintenance_window + + tags = local.mongodb_database_tags +} + +resource "aws_docdb_cluster" "documentdb_cluster" { + cluster_identifier = var.documentdb_identifier + + tags = local.mongodb_database_tags + + # DocumentDB instance basics + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + master_password = var.password + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + master_username = var.username + engine = "docdb" + {%- endif %} + storage_encrypted = var.encrypt_disk + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + + # Backups + backup_retention_period = var.backup_retention_period + preferred_backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} +} diff --git a/lib/aws-ec2/services/mongodb/variables.j2.tf b/lib/aws-ec2/services/mongodb/variables.j2.tf new file mode 100644 index 00000000..c2d5a36f --- /dev/null +++ b/lib/aws-ec2/services/mongodb/variables.j2.tf @@ -0,0 +1,43 @@ +# documentdb instance basics + +variable "documentdb_identifier" { + description = "Documentdb cluster name (Cluster identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "documentdb_instances_number" { + description = "DocumentDB instance numbers" + default = 1 + type = number +} + +variable "port" { + description = "Documentdb instance port" + default = {{ database_port }} + type = number +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{database_instance_type}}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mysql/local-vars.j2.tf b/lib/aws-ec2/services/mysql/local-vars.j2.tf new file mode 100644 index 00000000..0f6731e1 --- /dev/null +++ b/lib/aws-ec2/services/mysql/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + mysql_database_tags = merge (var.database_tags, { + database_identifier = var.mysql_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mysql/main.j2.tf b/lib/aws-ec2/services/mysql/main.j2.tf new file mode 100644 index 00000000..68e84b13 --- /dev/null +++ b/lib/aws-ec2/services/mysql/main.j2.tf @@ -0,0 +1,132 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["RDS"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +data "aws_iam_role" "rds_enhanced_monitoring" { + name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}" +} + +resource "helm_release" "mysql_instance_external_name" { + name = "${aws_db_instance.mysql_instance.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_db_instance.mysql_instance.address + } + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + set { + name = "app_id" + value = "{{database_id}}" + } + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_db_instance.mysql_instance + ] +} + +resource "aws_db_parameter_group" "mysql_parameter_group" { + name = "qovery-${var.mysql_identifier}" + family = var.parameter_group_family + + tags = local.mysql_database_tags + + # Set superuser permission to the default 'username' account + parameter { + name = "log_bin_trust_function_creators" + value = "1" + } +} + +# Non snapshoted version +resource "aws_db_instance" "mysql_instance" { + identifier = var.mysql_identifier + + tags = local.mysql_database_tags + + # MySQL instance basics + instance_class = var.instance_class + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + password = var.password + name = var.database_name + parameter_group_name = aws_db_parameter_group.mysql_parameter_group.name + storage_encrypted = var.encrypt_disk + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + allocated_storage = var.disk_size + storage_type = var.storage_type + username = var.username + engine_version = var.mysql_version + engine = "mysql" + ca_cert_identifier = "rds-ca-2019" + {%- endif %} + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + publicly_accessible = var.publicly_accessible + multi_az = var.multi_az + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + auto_minor_version_upgrade = var.auto_minor_version_upgrade + maintenance_window = var.preferred_maintenance_window + + # Monitoring + monitoring_interval = 10 + monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn + + # Backups + backup_retention_period = var.backup_retention_period + backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} + copy_tags_to_snapshot = true + delete_automated_backups = var.delete_automated_backups + +} diff --git a/lib/aws-ec2/services/mysql/variables.j2.tf b/lib/aws-ec2/services/mysql/variables.j2.tf new file mode 100644 index 00000000..84d6e420 --- /dev/null +++ b/lib/aws-ec2/services/mysql/variables.j2.tf @@ -0,0 +1,67 @@ +# MySQL instance basics + +variable "mysql_identifier" { + description = "MySQL instance name (DB identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "port" { + description = "MySQL instance port" + default = {{ database_port }} + type = number +} + +variable "disk_size" { + description = "disk instance size" + default = {{ database_disk_size_in_gib }} + type = number +} + +variable "mysql_version" { + description = "MySQL version" + default = "{{ version }}" + type = string +} + +variable "parameter_group_family" { + description = "RDS parameter group family" + default = "{{ parameter_group_family }}" + type = string +} + +variable "storage_type" { + description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)." + default = "{{ database_disk_type }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{database_instance_type}}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "database_name" { + description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance" + default = "{{ database_name }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/postgresql/local-vars.j2.tf b/lib/aws-ec2/services/postgresql/local-vars.j2.tf new file mode 100644 index 00000000..bf86e787 --- /dev/null +++ b/lib/aws-ec2/services/postgresql/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + postgres_database_tags = merge (var.database_tags, { + database_identifier = var.postgresql_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/postgresql/main.j2.tf b/lib/aws-ec2/services/postgresql/main.j2.tf new file mode 100644 index 00000000..9c0517db --- /dev/null +++ b/lib/aws-ec2/services/postgresql/main.j2.tf @@ -0,0 +1,121 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["RDS"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +data "aws_iam_role" "rds_enhanced_monitoring" { + name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}" +} + +resource "helm_release" "postgres_instance_external_name" { + name = "${aws_db_instance.postgresql_instance.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_db_instance.postgresql_instance.address + } + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + set { + name = "app_id" + value = "{{database_id}}" + } + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_db_instance.postgresql_instance + ] +} + + +# Non snapshoted version +resource "aws_db_instance" "postgresql_instance" { + identifier = var.postgresql_identifier + + tags = local.postgres_database_tags + + # Postgres instance basics + instance_class = var.instance_class + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + password = var.password + storage_encrypted = var.encrypt_disk + {%- if snapshot and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + allocated_storage = var.disk_size + name = var.database_name + storage_type = var.storage_type + username = var.username + engine_version = var.postgresql_version + engine = "postgres" + ca_cert_identifier = "rds-ca-2019" + {%- endif %} + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + publicly_accessible = var.publicly_accessible + multi_az = var.multi_az + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + auto_minor_version_upgrade = var.auto_minor_version_upgrade + maintenance_window = var.preferred_maintenance_window + + # Monitoring + performance_insights_enabled = var.performance_insights_enabled + performance_insights_retention_period = var.performance_insights_enabled_retention + monitoring_interval = 10 + monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn + + # Backups + backup_retention_period = var.backup_retention_period + backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} + copy_tags_to_snapshot = true + delete_automated_backups = var.delete_automated_backups + +} diff --git a/lib/aws-ec2/services/postgresql/variables.j2.tf b/lib/aws-ec2/services/postgresql/variables.j2.tf new file mode 100644 index 00000000..9feac6cd --- /dev/null +++ b/lib/aws-ec2/services/postgresql/variables.j2.tf @@ -0,0 +1,61 @@ +# PostgreSQL instance basics + +variable "postgresql_identifier" { + description = "PostgreSQL instance name (DB identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "port" { + description = "PostgreSQL instance port" + default = "{{ database_port }}" + type = number +} + +variable "disk_size" { + description = "disk instance size" + default = "{{ database_disk_size_in_gib }}" + type = number +} + +variable "postgresql_version" { + description = "Postgresql version" + default = "{{ version }}" + type = string +} + +variable "storage_type" { + description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)." + default = "{{ database_disk_type }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{ database_instance_type }}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "database_name" { + description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance" + default = "{{ database_name }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/redis/local-vars.j2.tf b/lib/aws-ec2/services/redis/local-vars.j2.tf new file mode 100644 index 00000000..96f2d53d --- /dev/null +++ b/lib/aws-ec2/services/redis/local-vars.j2.tf @@ -0,0 +1,7 @@ +locals { + redis_database_tags = merge (var.database_tags, { + database_identifier = var.elasticache_identifier + creationDate = time_static.on_db_create.rfc3339 + {% if snapshot is defined and snapshot["snapshot_id"] %}meta_last_restored_from = var.snapshot_identifier{% endif %} + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/redis/main.j2.tf b/lib/aws-ec2/services/redis/main.j2.tf new file mode 100644 index 00000000..98b2da8d --- /dev/null +++ b/lib/aws-ec2/services/redis/main.j2.tf @@ -0,0 +1,114 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "selected" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["Elasticache"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +resource "helm_release" "elasticache_instance_external_name" { + name = "${aws_elasticache_cluster.elasticache_cluster.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_elasticache_cluster.elasticache_cluster.cache_nodes.0.address + } + + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + + set { + name = "app_id" + value = "{{database_id}}" + } + + set { + name = "service_name" + value = "{{service_name}}" + } + + set { + name = "publicly_accessible" + value = var.publicly_accessible + } + + depends_on = [ + aws_elasticache_cluster.elasticache_cluster + ] +} + +resource "aws_elasticache_cluster" "elasticache_cluster" { + cluster_id = var.elasticache_identifier + + tags = local.redis_database_tags + + # Elasticache instance basics + port = var.port + engine_version = var.elasticache_version + # Thanks GOD AWS for not using SemVer and adding your own versioning system, + # need to add this dirty trick while Hashicorp fix this issue + # https://github.com/hashicorp/terraform-provider-aws/issues/15625 + lifecycle { + ignore_changes = [engine_version] + } + + {%- if replication_group_id is defined %} + # todo: add cluster mode and replicas support + {%- else %} + engine = "redis" + node_type = var.instance_class + num_cache_nodes = var.elasticache_instances_number + parameter_group_name = var.parameter_group_name + {%- endif %} + + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_name = var.snapshot_identifier + {%- endif %} + + # Network + # WARNING: this value cna't get fetch from data sources and is linked to the bootstrap phase + subnet_group_name = "elasticache-${data.aws_vpc.selected.id}" + + # Security + security_group_ids = data.aws_security_group.selected.*.id + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + maintenance_window = var.preferred_maintenance_window + + # Backups + snapshot_window = var.preferred_backup_window + snapshot_retention_limit = var.backup_retention_period + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} +} diff --git a/lib/aws-ec2/services/redis/variables.j2.tf b/lib/aws-ec2/services/redis/variables.j2.tf new file mode 100644 index 00000000..2383a83e --- /dev/null +++ b/lib/aws-ec2/services/redis/variables.j2.tf @@ -0,0 +1,37 @@ +# elasticache instance basics + +variable "elasticache_identifier" { + description = "Elasticache cluster name (Cluster identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "elasticache_version" { + description = "Elasticache version" + default = "{{ version }}" + type = string +} + +variable "parameter_group_name" { + description = "Elasticache parameter group name" + default = "{{ database_elasticache_parameter_group_name }}" + type = string +} + +variable "elasticache_instances_number" { + description = "Elasticache instance numbers" + default = 1 + type = number +} + +variable "port" { + description = "Elasticache instance port" + default = {{ database_port }} + type = number +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html" + default = "{{database_instance_type}}" + type = string +} \ No newline at end of file diff --git a/lib/aws/bootstrap-eks/README.md b/lib/aws/bootstrap/README.md similarity index 100% rename from lib/aws/bootstrap-eks/README.md rename to lib/aws/bootstrap/README.md diff --git a/lib/aws/bootstrap-eks/backend.j2.tf b/lib/aws/bootstrap/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/backend.j2.tf rename to lib/aws/bootstrap/backend.j2.tf diff --git a/lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml b/lib/aws/bootstrap/chart_values/external-dns.j2.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml rename to lib/aws/bootstrap/chart_values/external-dns.j2.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml b/lib/aws/bootstrap/chart_values/grafana.j2.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml rename to lib/aws/bootstrap/chart_values/grafana.j2.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml b/lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml rename to lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/loki.yaml b/lib/aws/bootstrap/chart_values/loki.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/loki.yaml rename to lib/aws/bootstrap/chart_values/loki.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/metrics-server.yaml b/lib/aws/bootstrap/chart_values/metrics-server.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/metrics-server.yaml rename to lib/aws/bootstrap/chart_values/metrics-server.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml b/lib/aws/bootstrap/chart_values/nginx-ingress.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml rename to lib/aws/bootstrap/chart_values/nginx-ingress.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/pleco.yaml b/lib/aws/bootstrap/chart_values/pleco.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/pleco.yaml rename to lib/aws/bootstrap/chart_values/pleco.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/.helmignore b/lib/aws/bootstrap/charts/aws-calico/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/.helmignore rename to lib/aws/bootstrap/charts/aws-calico/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml rename to lib/aws/bootstrap/charts/aws-calico/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/README.md b/lib/aws/bootstrap/charts/aws-calico/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/README.md rename to lib/aws/bootstrap/charts/aws-calico/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml b/lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml rename to lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/service.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/values.yaml b/lib/aws/bootstrap/charts/aws-calico/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/values.yaml rename to lib/aws/bootstrap/charts/aws-calico/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore b/lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore rename to lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore rename to lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md rename to lib/aws/bootstrap/charts/aws-node-termination-handler/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml b/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml b/lib/aws/bootstrap/charts/aws-ui-view/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md rename to lib/aws/bootstrap/charts/aws-vpc-cni/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/.helmignore b/lib/aws/bootstrap/charts/coredns-config/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/.helmignore rename to lib/aws/bootstrap/charts/coredns-config/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml b/lib/aws/bootstrap/charts/coredns-config/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml rename to lib/aws/bootstrap/charts/coredns-config/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl b/lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml b/lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml rename to lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/values.yaml b/lib/aws/bootstrap/charts/coredns-config/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/values.yaml rename to lib/aws/bootstrap/charts/coredns-config/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore b/lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore b/lib/aws/bootstrap/charts/q-storageclass/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore rename to lib/aws/bootstrap/charts/q-storageclass/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml b/lib/aws/bootstrap/charts/q-storageclass/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml rename to lib/aws/bootstrap/charts/q-storageclass/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl b/lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml b/lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml rename to lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml b/lib/aws/bootstrap/charts/q-storageclass/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml rename to lib/aws/bootstrap/charts/q-storageclass/values.yaml diff --git a/lib/aws/bootstrap-eks/documentdb.tf b/lib/aws/bootstrap/documentdb.tf similarity index 100% rename from lib/aws/bootstrap-eks/documentdb.tf rename to lib/aws/bootstrap/documentdb.tf diff --git a/lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf b/lib/aws/bootstrap/eks-ebs-csi-driver.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf rename to lib/aws/bootstrap/eks-ebs-csi-driver.tf diff --git a/lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf b/lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf rename to lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-cluster.j2.tf rename to lib/aws/bootstrap/eks-master-cluster.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-master-iam.tf b/lib/aws/bootstrap/eks-master-iam.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-iam.tf rename to lib/aws/bootstrap/eks-master-iam.tf diff --git a/lib/aws/bootstrap-eks/eks-master-sec-group.tf b/lib/aws/bootstrap/eks-master-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-sec-group.tf rename to lib/aws/bootstrap/eks-master-sec-group.tf diff --git a/lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf b/lib/aws/bootstrap/eks-s3-kubeconfig-store.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf rename to lib/aws/bootstrap/eks-s3-kubeconfig-store.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf b/lib/aws/bootstrap/eks-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-common.j2.tf rename to lib/aws/bootstrap/eks-vpc-common.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf b/lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf rename to lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf rename to lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-iam.tf b/lib/aws/bootstrap/eks-workers-iam.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-iam.tf rename to lib/aws/bootstrap/eks-workers-iam.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf b/lib/aws/bootstrap/eks-workers-nodes.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf rename to lib/aws/bootstrap/eks-workers-nodes.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-sec-group.tf b/lib/aws/bootstrap/eks-workers-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-sec-group.tf rename to lib/aws/bootstrap/eks-workers-sec-group.tf diff --git a/lib/aws/bootstrap-eks/elasticcache.tf b/lib/aws/bootstrap/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap-eks/elasticcache.tf rename to lib/aws/bootstrap/elasticcache.tf diff --git a/lib/aws/bootstrap-eks/elasticsearch.tf b/lib/aws/bootstrap/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap-eks/elasticsearch.tf rename to lib/aws/bootstrap/elasticsearch.tf diff --git a/lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf b/lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf rename to lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf diff --git a/lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf b/lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf rename to lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-grafana.j2.tf b/lib/aws/bootstrap/helm-grafana.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-grafana.j2.tf rename to lib/aws/bootstrap/helm-grafana.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-loki.j2.tf b/lib/aws/bootstrap/helm-loki.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-loki.j2.tf rename to lib/aws/bootstrap/helm-loki.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-nginx-ingress.tf b/lib/aws/bootstrap/helm-nginx-ingress.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-nginx-ingress.tf rename to lib/aws/bootstrap/helm-nginx-ingress.tf diff --git a/lib/aws/bootstrap-eks/helper.j2.sh b/lib/aws/bootstrap/helper.j2.sh similarity index 100% rename from lib/aws/bootstrap-eks/helper.j2.sh rename to lib/aws/bootstrap/helper.j2.sh diff --git a/lib/aws/bootstrap-eks/qovery-tf-config.j2.tf b/lib/aws/bootstrap/qovery-tf-config.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/qovery-tf-config.j2.tf rename to lib/aws/bootstrap/qovery-tf-config.j2.tf diff --git a/lib/aws/bootstrap-eks/qovery-vault.j2.tf b/lib/aws/bootstrap/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/qovery-vault.j2.tf rename to lib/aws/bootstrap/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap-eks/rds.tf b/lib/aws/bootstrap/rds.tf similarity index 100% rename from lib/aws/bootstrap-eks/rds.tf rename to lib/aws/bootstrap/rds.tf diff --git a/lib/aws/bootstrap-eks/s3-qovery-buckets.tf b/lib/aws/bootstrap/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap-eks/s3-qovery-buckets.tf rename to lib/aws/bootstrap/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap-eks/tf-default-vars.j2.tf b/lib/aws/bootstrap/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/tf-default-vars.j2.tf rename to lib/aws/bootstrap/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap-eks/tf-providers-aws.j2.tf b/lib/aws/bootstrap/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/tf-providers-aws.j2.tf rename to lib/aws/bootstrap/tf-providers-aws.j2.tf diff --git a/lib/edge/aws/backend.j2.tf b/lib/edge/aws/backend.j2.tf deleted file mode 100644 index a1418800..00000000 --- a/lib/edge/aws/backend.j2.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - backend "s3" { - access_key = "{{ aws_access_key_tfstates_account }}" - secret_key = "{{ aws_secret_key_tfstates_account }}" - bucket = "{{ aws_terraform_backend_bucket }}" - key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" - dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" - region = "{{ aws_region_tfstates_account }}" - } -} diff --git a/lib/edge/aws/documentdb.tf b/lib/edge/aws/documentdb.tf deleted file mode 100644 index ea04fec0..00000000 --- a/lib/edge/aws/documentdb.tf +++ /dev/null @@ -1,81 +0,0 @@ -locals { - tags_documentdb = merge( - aws_eks_cluster.eks_cluster.tags, - { - "Service" = "DocumentDB" - } - ) -} - -# Network - -resource "aws_subnet" "documentdb_zone_a" { - count = length(var.documentdb_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.documentdb_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_subnet" "documentdb_zone_b" { - count = length(var.documentdb_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.documentdb_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_subnet" "documentdb_zone_c" { - count = length(var.documentdb_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.documentdb_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_route_table_association" "documentdb_cluster_zone_a" { - count = length(var.documentdb_subnets_zone_a) - - subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "documentdb_cluster_zone_b" { - count = length(var.documentdb_subnets_zone_b) - - subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "documentdb_cluster_zone_c" { - count = length(var.documentdb_subnets_zone_c) - - subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_docdb_subnet_group" "documentdb" { - description = "DocumentDB linked to ${var.kubernetes_cluster_id}" - name = "documentdb-${aws_vpc.eks.id}" - subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) - - tags = local.tags_documentdb -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "documentdb_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow DocumentDB incoming access from anywhere" - from_port = 27017 - protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id - to_port = 27017 - type = "ingress" -} diff --git a/lib/edge/aws/eks-vpc-common.j2.tf b/lib/edge/aws/eks-vpc-common.j2.tf deleted file mode 100644 index 63b91880..00000000 --- a/lib/edge/aws/eks-vpc-common.j2.tf +++ /dev/null @@ -1,42 +0,0 @@ -data "aws_availability_zones" "available" {} - -locals { - tags_eks_vpc = merge( - local.tags_common, - { - Name = "qovery-eks-workers", - "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", - "kubernetes.io/role/elb" = 1, - {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} - } - ) - - tags_eks_vpc_public = merge( - local.tags_eks_vpc, - { - "Public" = "true" - } - ) - - tags_eks_vpc_private = merge( - local.tags_eks, - { - "Public" = "false" - } - ) -} - -# VPC -resource "aws_vpc" "eks" { - cidr_block = var.vpc_cidr_block - enable_dns_hostnames = true - - tags = local.tags_eks_vpc -} - -# Internet gateway -resource "aws_internet_gateway" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - tags = local.tags_eks_vpc -} \ No newline at end of file diff --git a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf deleted file mode 100644 index d0174308..00000000 --- a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf +++ /dev/null @@ -1,75 +0,0 @@ -{% if vpc_qovery_network_mode == "WithoutNatGateways" %} -# Public subnets -resource "aws_subnet" "eks_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.eks_subnets_zone_a_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.eks_subnets_zone_b_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.eks_subnets_zone_c_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_route_table" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.eks_cluster.id - } - - // todo(pmavro): add tests for it when it will be available in the SDK - {% for route in vpc_custom_routing_table %} - route { - cidr_block = "{{ route.destination }}" - gateway_id = "{{ route.target }}" - } - {% endfor %} - - tags = local.tags_eks_vpc -} - -resource "aws_route_table_association" "eks_cluster_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - subnet_id = aws_subnet.eks_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - subnet_id = aws_subnet.eks_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - subnet_id = aws_subnet.eks_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} -{% endif %} \ No newline at end of file diff --git a/lib/edge/aws/elasticcache.tf b/lib/edge/aws/elasticcache.tf deleted file mode 100644 index 44073c63..00000000 --- a/lib/edge/aws/elasticcache.tf +++ /dev/null @@ -1,80 +0,0 @@ -locals { - tags_elasticache = merge( - aws_eks_cluster.eks_cluster.tags, - { - "Service" = "Elasticache" - } - ) -} - -# Network - -resource "aws_subnet" "elasticache_zone_a" { - count = length(var.elasticache_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.elasticache_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_subnet" "elasticache_zone_b" { - count = length(var.elasticache_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.elasticache_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_subnet" "elasticache_zone_c" { - count = length(var.elasticache_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.elasticache_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_route_table_association" "elasticache_cluster_zone_a" { - count = length(var.elasticache_subnets_zone_a) - - subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticache_cluster_zone_b" { - count = length(var.elasticache_subnets_zone_b) - - subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticache_cluster_zone_c" { - count = length(var.elasticache_subnets_zone_c) - - subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_elasticache_subnet_group" "elasticache" { - description = "Elasticache linked to ${var.kubernetes_cluster_id}" - # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly - name = "elasticache-${aws_vpc.eks.id}" - subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "elasticache_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow Redis incoming access from anywhere" - from_port = 6379 - protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id - to_port = 6379 - type = "ingress" -} diff --git a/lib/edge/aws/elasticsearch.tf b/lib/edge/aws/elasticsearch.tf deleted file mode 100644 index f5e873dd..00000000 --- a/lib/edge/aws/elasticsearch.tf +++ /dev/null @@ -1,79 +0,0 @@ -locals { - tags_elasticsearch = merge( - local.tags_eks, - { - "Service" = "Elasticsearch" - } - ) -} - -# Network - -resource "aws_subnet" "elasticsearch_zone_a" { - count = length(var.elasticsearch_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.elasticsearch_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_subnet" "elasticsearch_zone_b" { - count = length(var.elasticsearch_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.elasticsearch_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_subnet" "elasticsearch_zone_c" { - count = length(var.elasticsearch_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.elasticsearch_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { - count = length(var.elasticsearch_subnets_zone_a) - - subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { - count = length(var.elasticsearch_subnets_zone_b) - - subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { - count = length(var.elasticsearch_subnets_zone_c) - - subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_security_group" "elasticsearch" { - name = "elasticsearch-${var.kubernetes_cluster_id}" - description = "Elasticsearch security group" - vpc_id = aws_vpc.eks.id - - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - - cidr_blocks = [ - aws_vpc.eks.cidr_block - ] - } - - tags = local.tags_elasticsearch -} diff --git a/lib/edge/aws/qovery-vault.j2.tf b/lib/edge/aws/qovery-vault.j2.tf deleted file mode 100644 index b12afa38..00000000 --- a/lib/edge/aws/qovery-vault.j2.tf +++ /dev/null @@ -1,29 +0,0 @@ -locals { - kubeconfig_base64 = base64encode(local.kubeconfig) -} -// do not run for tests clusters to avoid uncleaned info. -// do not try to use count into resource, it will fails trying to connect to vault -{% if vault_auth_method != "none" and not test_cluster %} -resource "vault_generic_secret" "cluster-access" { - path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" - - data_json = <, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + template_directory: String, + options: Options, + listeners: Listeners, + logger: Box, +} + +impl EC2 { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + logger: Box, + ) -> Result { + let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws-ec2/bootstrap", context.lib_root_dir()); + + let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; + let s3 = kubernetes::s3(&context, ®ion, &**cloud_provider); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EC2 { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + template_directory, + logger, + listeners, + }) + } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } +} + +impl Kubernetes for EC2 { + fn context(&self) -> &Context { + &self.context + } + + fn kind(&self) -> Kind { + Kind::Ec2 + } + + fn id(&self) -> &str { + self.id.as_str() + } + + fn name(&self) -> &str { + self.name.as_str() + } + + fn version(&self) -> &str { + self.version.as_str() + } + + fn region(&self) -> String { + self.region.to_aws_format() + } + + fn zone(&self) -> &str { + "" + } + + fn aws_zones(&self) -> Option> { + Some(self.zones.clone()) + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + (*self.cloud_provider).borrow() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + (*self.dns_provider).borrow() + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + &self.s3 + } + + fn is_valid(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_create(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + kubernetes::create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &vec![], + &self.options, + ) + }) + } + + #[named] + fn on_create_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::create_error(self)) + } + + fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + // TODO + Ok(()) + } + + #[named] + fn on_upgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) + } + + #[named] + fn on_upgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::upgrade_error(self)) + } + + #[named] + fn on_downgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, kubernetes::downgrade) + } + + #[named] + fn on_downgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::downgrade_error(self)) + } + + #[named] + fn on_pause(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) + } + + #[named] + fn on_pause_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || kubernetes::pause_error(self)) + } + + #[named] + fn on_delete(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) + } + + #[named] + fn on_delete_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || kubernetes::delete_error(self)) + } + + #[named] + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::pause_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } + + #[named] + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::delete_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } +} + +impl Listen for EC2 { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/aws/kubernetes/eks.rs b/src/cloud_provider/aws/kubernetes/eks.rs new file mode 100644 index 00000000..004923db --- /dev/null +++ b/src/cloud_provider/aws/kubernetes/eks.rs @@ -0,0 +1,670 @@ +use crate::cloud_provider; +use crate::cloud_provider::aws::kubernetes; +use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType; +use crate::cloud_provider::aws::kubernetes::Options; +use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; +use crate::cloud_provider::environment::Environment; +use crate::cloud_provider::kubernetes::{ + send_progress_on_long_task, Kind, Kubernetes, KubernetesNodesType, KubernetesUpgradeStatus, +}; +use crate::cloud_provider::models::NodeGroups; +use crate::cloud_provider::utilities::print_action; +use crate::cloud_provider::CloudProvider; +use crate::cmd::kubectl::{kubectl_exec_scale_replicas, ScalingKind}; +use crate::cmd::terraform::terraform_init_validate_plan_apply; +use crate::dns_provider::DnsProvider; +use crate::errors::EngineError; +use crate::events::Stage::Infrastructure; +use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage}; +use crate::io_models::{Action, Context, Listen, Listener, Listeners, ListenersHelper}; +use crate::logger::Logger; +use crate::object_storage::s3::S3; +use crate::object_storage::ObjectStorage; +use function_name::named; +use std::borrow::Borrow; +use std::str::FromStr; +use std::sync::Arc; + +pub struct EKS { + context: Context, + id: String, + long_id: uuid::Uuid, + name: String, + version: String, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + nodes_groups: Vec, + template_directory: String, + options: Options, + listeners: Listeners, + logger: Box, +} + +impl EKS { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + nodes_groups: Vec, + logger: Box, + ) -> Result { + let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + + let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; + + for node_group in &nodes_groups { + if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { + let err = + EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); + + logger.log(EngineEvent::Error(err.clone(), None)); + + return Err(err); + } + } + + let s3 = kubernetes::s3(&context, ®ion, &**cloud_provider); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EKS { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + nodes_groups, + template_directory, + logger, + listeners, + }) + } + + fn set_cluster_autoscaler_replicas( + &self, + event_details: EventDetails, + replicas_count: u32, + ) -> Result<(), EngineError> { + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), + )); + let (kubeconfig_path, _) = self.get_kubeconfig_file()?; + let selector = "cluster-autoscaler-aws-cluster-autoscaler"; + let namespace = "kube-system"; + let _ = kubectl_exec_scale_replicas( + kubeconfig_path, + self.cloud_provider().credentials_environment_variables(), + namespace, + ScalingKind::Deployment, + selector, + replicas_count, + ) + .map_err(|e| { + EngineError::new_k8s_scale_replicas( + event_details.clone(), + selector.to_string(), + namespace.to_string(), + replicas_count, + e, + ) + })?; + + Ok(()) + } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } +} + +impl Kubernetes for EKS { + fn context(&self) -> &Context { + &self.context + } + + fn kind(&self) -> Kind { + Kind::Eks + } + + fn id(&self) -> &str { + self.id.as_str() + } + + fn name(&self) -> &str { + self.name.as_str() + } + + fn version(&self) -> &str { + self.version.as_str() + } + + fn region(&self) -> String { + self.region.to_aws_format() + } + + fn zone(&self) -> &str { + "" + } + + fn aws_zones(&self) -> Option> { + Some(self.zones.clone()) + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + (*self.cloud_provider).borrow() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + (*self.dns_provider).borrow() + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + &self.s3 + } + + fn is_valid(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_create(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + kubernetes::create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_create_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::create_error(self)) + } + + fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); + let listeners_helper = ListenersHelper::new(&self.listeners); + + self.send_to_customer( + format!( + "Start preparing EKS upgrade process {} cluster with id {}", + self.name(), + self.id() + ) + .as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), + )); + + let temp_dir = self.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let mut context = kubernetes::tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; + + // + // Upgrade master nodes + // + match &kubernetes_upgrade_status.required_upgrade_on { + Some(KubernetesNodesType::Masters) => { + self.send_to_customer( + format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), + )); + + // AWS requires the upgrade to be done in 2 steps (masters, then workers) + // use the current kubernetes masters' version for workers, in order to avoid migration in one step + context.insert( + "kubernetes_master_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); + // use the current master version for workers, they will be updated later + context.insert( + "eks_workers_version", + format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(), + ); + + if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( + self.template_directory.as_str(), + temp_dir.as_str(), + context.clone(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + self.template_directory.to_string(), + temp_dir, + e, + )); + } + + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); + if let Err(e) = crate::template::copy_non_template_files( + common_bootstrap_charts.as_str(), + common_charts_temp_dir.as_str(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + common_bootstrap_charts, + common_charts_temp_dir, + e, + )); + } + + self.send_to_customer( + format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), + )); + + match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { + Ok(_) => { + self.send_to_customer( + format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes master nodes have been successfully upgraded.".to_string(), + ), + )); + } + Err(e) => { + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + } + } + Some(KubernetesNodesType::Workers) => { + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), + ), + )); + } + None => { + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), + ), + )); + return Ok(()); + } + } + + if let Err(e) = self.delete_crashlooping_pods( + None, + None, + Some(3), + self.cloud_provider().credentials_environment_variables(), + Stage::Infrastructure(InfrastructureStep::Upgrade), + ) { + self.logger().log(EngineEvent::Error(e.clone(), None)); + return Err(e); + } + + // + // Upgrade worker nodes + // + self.send_to_customer( + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); + + // disable cluster autoscaler to avoid interfering with AWS upgrade procedure + context.insert("enable_cluster_autoscaler", &false); + context.insert( + "eks_workers_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); + + if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( + self.template_directory.as_str(), + temp_dir.as_str(), + context.clone(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + self.template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); + if let Err(e) = + crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + common_bootstrap_charts, + common_charts_temp_dir, + e, + )); + } + + self.send_to_customer( + format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), + )); + + // Disable cluster autoscaler deployment + let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; + + match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { + Ok(_) => { + self.send_to_customer( + format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes workers nodes have been successfully upgraded.".to_string(), + ), + )); + } + Err(e) => { + // enable cluster autoscaler deployment + let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; + + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + } + + // enable cluster autoscaler deployment + self.set_cluster_autoscaler_replicas(event_details, 1) + } + + #[named] + fn on_upgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) + } + + #[named] + fn on_upgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::upgrade_error(self)) + } + + #[named] + fn on_downgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, kubernetes::downgrade) + } + + #[named] + fn on_downgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::downgrade_error(self)) + } + + #[named] + fn on_pause(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + kubernetes::pause( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_pause_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || kubernetes::pause_error(self)) + } + + #[named] + fn on_delete(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + kubernetes::delete( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_delete_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || kubernetes::delete_error(self)) + } + + #[named] + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::pause_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } + + #[named] + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::delete_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } +} + +impl Listen for EKS { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 345470d8..09df99cb 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -1,9 +1,6 @@ use core::fmt; -use std::borrow::Borrow; use std::env; use std::path::Path; -use std::str::FromStr; -use std::sync::Arc; use retry::delay::{Fibonacci, Fixed}; use retry::Error::Operation; @@ -12,42 +9,30 @@ use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites}; -use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType; use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create; use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; -use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo}; use crate::cloud_provider::kubernetes::{ - is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes, - KubernetesNodesType, KubernetesUpgradeStatus, ProviderOptions, + is_kubernetes_upgrade_required, uninstall_cert_manager, Kubernetes, ProviderOptions, }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::utilities::print_action; -use crate::cloud_provider::{kubernetes, CloudProvider}; +use crate::cloud_provider::CloudProvider; use crate::cmd; use crate::cmd::helm::{to_engine_error, Helm}; -use crate::cmd::kubectl::{ - kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, - kubectl_exec_scale_replicas, ScalingKind, -}; +use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events}; use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider; use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; -use crate::events::Stage::Infrastructure; -use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::io_models::{ - Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, - ToTerraformString, -}; -use crate::logger::Logger; +use crate::events::{EngineEvent, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; +use crate::io_models::{Context, Features, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString}; use crate::object_storage::s3::S3; -use crate::object_storage::ObjectStorage; use crate::string::terraform_list_format; -use ::function_name::named; +pub mod ec2; +pub mod eks; pub mod helm_charts; pub mod node; pub mod roles; @@ -122,1022 +107,6 @@ pub struct Options { impl ProviderOptions for Options {} -pub struct EKS { - context: Context, - id: String, - long_id: uuid::Uuid, - name: String, - version: String, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - s3: S3, - nodes_groups: Vec, - template_directory: String, - options: Options, - listeners: Listeners, - logger: Box, -} - -impl EKS { - pub fn new( - context: Context, - id: &str, - long_id: uuid::Uuid, - name: &str, - version: &str, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - options: Options, - nodes_groups: Vec, - logger: Box, - ) -> Result { - let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap-eks", context.lib_root_dir()); - - let aws_zones = aws_zones(zones, ®ion, &event_details)?; - - for node_group in &nodes_groups { - if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { - let err = - EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); - - logger.log(EngineEvent::Error(err.clone(), None)); - - return Err(err); - } - } - - let s3 = s3(&context, ®ion, &**cloud_provider); - - // copy listeners from CloudProvider - let listeners = cloud_provider.listeners().clone(); - Ok(EKS { - context, - id: id.to_string(), - long_id, - name: name.to_string(), - version: version.to_string(), - region, - zones: aws_zones, - cloud_provider, - dns_provider, - s3, - options, - nodes_groups, - template_directory, - logger, - listeners, - }) - } - - fn set_cluster_autoscaler_replicas( - &self, - event_details: EventDetails, - replicas_count: u32, - ) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), - )); - let (kubeconfig_path, _) = self.get_kubeconfig_file()?; - let selector = "cluster-autoscaler-aws-cluster-autoscaler"; - let namespace = "kube-system"; - let _ = kubectl_exec_scale_replicas( - kubeconfig_path, - self.cloud_provider().credentials_environment_variables(), - namespace, - ScalingKind::Deployment, - selector, - replicas_count, - ) - .map_err(|e| { - EngineError::new_k8s_scale_replicas( - event_details.clone(), - selector.to_string(), - namespace.to_string(), - replicas_count, - e, - ) - })?; - - Ok(()) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "kubernetes" - } -} - -impl Kubernetes for EKS { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::Eks - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn version(&self) -> &str { - self.version.as_str() - } - - fn region(&self) -> String { - self.region.to_aws_format() - } - - fn zone(&self) -> &str { - "" - } - - fn aws_zones(&self) -> Option> { - Some(self.zones.clone()) - } - - fn cloud_provider(&self) -> &dyn CloudProvider { - (*self.cloud_provider).borrow() - } - - fn dns_provider(&self) -> &dyn DnsProvider { - (*self.dns_provider).borrow() - } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } - - fn config_file_store(&self) -> &dyn ObjectStorage { - &self.s3 - } - - fn is_valid(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || { - create( - self, - self.long_id, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || create_error(self)) - } - - fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.send_to_customer( - format!( - "Start preparing EKS upgrade process {} cluster with id {}", - self.name(), - self.id() - ) - .as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let mut context = tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; - - // - // Upgrade master nodes - // - match &kubernetes_upgrade_status.required_upgrade_on { - Some(KubernetesNodesType::Masters) => { - self.send_to_customer( - format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), - )); - - // AWS requires the upgrade to be done in 2 steps (masters, then workers) - // use the current kubernetes masters' version for workers, in order to avoid migration in one step - context.insert( - "kubernetes_master_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); - // use the current master version for workers, they will be updated later - context.insert( - "eks_workers_version", - format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(), - ); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context.clone(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - if let Err(e) = crate::template::copy_non_template_files( - common_bootstrap_charts.as_str(), - common_charts_temp_dir.as_str(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - common_bootstrap_charts, - common_charts_temp_dir, - e, - )); - } - - self.send_to_customer( - format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), - )); - - match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes master nodes have been successfully upgraded.".to_string(), - ), - )); - } - Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - } - } - Some(KubernetesNodesType::Workers) => { - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), - ), - )); - } - None => { - self.logger().log(EngineEvent::Info( - event_details, - EventMessage::new_from_safe( - "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), - ), - )); - return Ok(()); - } - } - - if let Err(e) = self.delete_crashlooping_pods( - None, - None, - Some(3), - self.cloud_provider().credentials_environment_variables(), - Stage::Infrastructure(InfrastructureStep::Upgrade), - ) { - self.logger().log(EngineEvent::Error(e.clone(), None)); - return Err(e); - } - - // - // Upgrade worker nodes - // - self.send_to_customer( - format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - )); - - // disable cluster autoscaler to avoid interfering with AWS upgrade procedure - context.insert("enable_cluster_autoscaler", &false); - context.insert( - "eks_workers_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context.clone(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - if let Err(e) = - crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - common_bootstrap_charts, - common_charts_temp_dir, - e, - )); - } - - self.send_to_customer( - format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), - )); - - // Disable cluster autoscaler deployment - let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; - - match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes workers nodes have been successfully upgraded.".to_string(), - ), - )); - } - Err(e) => { - // enable cluster autoscaler deployment - let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; - - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - } - - // enable cluster autoscaler deployment - self.set_cluster_autoscaler_replicas(event_details, 1) - } - - #[named] - fn on_upgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || self.upgrade()) - } - - #[named] - fn on_upgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) - } - - #[named] - fn on_downgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, downgrade) - } - - #[named] - fn on_downgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) - } - - #[named] - fn on_pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || { - pause( - self, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_pause_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || pause_error(self)) - } - - #[named] - fn on_delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || { - delete( - self, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_delete_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || delete_error(self)) - } - - #[named] - fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::pause_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - #[named] - fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::delete_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for EKS { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -pub struct EC2 { - context: Context, - id: String, - long_id: uuid::Uuid, - name: String, - version: String, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - s3: S3, - template_directory: String, - options: Options, - listeners: Listeners, - logger: Box, -} - -impl EC2 { - pub fn new( - context: Context, - id: &str, - long_id: uuid::Uuid, - name: &str, - version: &str, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - options: Options, - logger: Box, - ) -> Result { - let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap-ec2", context.lib_root_dir()); - - let aws_zones = aws_zones(zones, ®ion, &event_details)?; - let s3 = s3(&context, ®ion, &**cloud_provider); - - // copy listeners from CloudProvider - let listeners = cloud_provider.listeners().clone(); - Ok(EC2 { - context, - id: id.to_string(), - long_id, - name: name.to_string(), - version: version.to_string(), - region, - zones: aws_zones, - cloud_provider, - dns_provider, - s3, - options, - template_directory, - logger, - listeners, - }) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "kubernetes" - } -} - -impl Kubernetes for EC2 { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::Ec2 - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn version(&self) -> &str { - self.version.as_str() - } - - fn region(&self) -> String { - self.region.to_aws_format() - } - - fn zone(&self) -> &str { - "" - } - - fn aws_zones(&self) -> Option> { - Some(self.zones.clone()) - } - - fn cloud_provider(&self) -> &dyn CloudProvider { - (*self.cloud_provider).borrow() - } - - fn dns_provider(&self) -> &dyn DnsProvider { - (*self.dns_provider).borrow() - } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } - - fn config_file_store(&self) -> &dyn ObjectStorage { - &self.s3 - } - - fn is_valid(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || { - create( - self, - self.long_id, - self.template_directory.as_str(), - &self.zones, - &vec![], - &self.options, - ) - }) - } - - #[named] - fn on_create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || create_error(self)) - } - - fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - // TODO - Ok(()) - } - - #[named] - fn on_upgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || self.upgrade()) - } - - #[named] - fn on_upgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) - } - - #[named] - fn on_downgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, downgrade) - } - - #[named] - fn on_downgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) - } - - #[named] - fn on_pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || { - pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) - }) - } - - #[named] - fn on_pause_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || pause_error(self)) - } - - #[named] - fn on_delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || { - delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) - }) - } - - #[named] - fn on_delete_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || delete_error(self)) - } - - #[named] - fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::pause_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - #[named] - fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::delete_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for EC2 { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - fn event_details>( cloud_provider: &Box, kubernetes_id: S, @@ -1172,7 +141,7 @@ fn aws_zones( region.to_string(), zone, CommandError::new_from_safe_message(e.to_string()), - )) + )); } }; } @@ -1564,7 +533,6 @@ fn create( } }, Err(_) => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) - }; // create AWS IAM roles @@ -1647,7 +615,7 @@ fn create( event_details, entry.to_string(), e, - )) + )); } } }; @@ -1905,10 +873,10 @@ fn pause( kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { - return Err(error) + return Err(error); } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))); } } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 8a8e857e..57f77098 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -20,7 +20,9 @@ use crate::utilities::{ FuncTestsSecrets, }; use base64; -use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EC2, EKS}; +use qovery_engine::cloud_provider::aws::kubernetes::ec2::EC2; +use qovery_engine::cloud_provider::aws::kubernetes::eks::EKS; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; From 3a5fb696cadeab1285912ad132efd7de898bc7ff Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 23:56:43 +0200 Subject: [PATCH 057/122] wip: fix ec2 tests --- src/cloud_provider/aws/kubernetes/mod.rs | 4 ++++ test_utilities/src/aws.rs | 13 +++++++++++++ .../aws_kubernetes_ec2.rs} | 12 ++++++------ tests/aws/mod.rs | 1 + tests/edge/aws/mod.rs | 1 - tests/edge/mod.rs | 1 - tests/lib.rs | 1 - 7 files changed, 24 insertions(+), 9 deletions(-) rename tests/{edge/aws/edge_aws_kubernetes.rs => aws/aws_kubernetes_ec2.rs} (86%) delete mode 100644 tests/edge/aws/mod.rs delete mode 100644 tests/edge/mod.rs diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 09df99cb..f24bf34b 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -81,8 +81,10 @@ pub struct Options { pub vpc_qovery_network_mode: VpcQoveryNetworkMode, pub vpc_cidr_block: String, pub eks_cidr_subnet: String, + pub ec2_cidr_subnet: String, pub vpc_custom_routing_table: Vec, pub eks_access_cidr_blocks: Vec, + pub ec2_access_cidr_blocks: Vec, pub rds_cidr_subnet: String, pub documentdb_cidr_subnet: String, pub elasticache_cidr_subnet: String, @@ -438,6 +440,8 @@ fn tera_context( context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); context.insert("eks_masters_version", &kubernetes.version()); context.insert("eks_workers_version", &kubernetes.version()); + context.insert("ec2_masters_version", &kubernetes.version()); + context.insert("ec2_workers_version", &kubernetes.version()); context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index c6c5c4b1..6c5d7ee2 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -32,6 +32,8 @@ pub const AWS_KUBERNETES_VERSION: &'static str = pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; +pub const K3S_KUBERNETES_MAJOR_VERSION: u8 = 1; +pub const K3S_KUBERNETES_MINOR_VERSION: u8 = 20; pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR { let secrets = FuncTestsSecrets::new(); @@ -208,9 +210,20 @@ impl Cluster for AWS { vpc_qovery_network_mode: VpcQoveryNetworkMode::WithoutNatGateways, vpc_cidr_block: "10.0.0.0/16".to_string(), eks_cidr_subnet: "20".to_string(), + ec2_cidr_subnet: "20".to_string(), vpc_custom_routing_table: vec![], eks_access_cidr_blocks: secrets .EKS_ACCESS_CIDR_BLOCKS + .as_ref() + .unwrap() + .replace("\"", "") + .replace("[", "") + .replace("]", "") + .split(",") + .map(|c| c.to_string()) + .collect(), + ec2_access_cidr_blocks: secrets + .EKS_ACCESS_CIDR_BLOCKS // FIXME ? use an EC2_ACCESS_CIDR_BLOCKS? .unwrap() .replace("\"", "") .replace("[", "") diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/aws/aws_kubernetes_ec2.rs similarity index 86% rename from tests/edge/aws/edge_aws_kubernetes.rs rename to tests/aws/aws_kubernetes_ec2.rs index a46c9368..074abe5f 100644 --- a/tests/edge/aws/edge_aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -1,6 +1,5 @@ extern crate test_utilities; -use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::kubernetes::Kind as KKind; @@ -10,12 +9,13 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithN use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; +use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; #[cfg(feature = "test-aws-infra")] -fn create_and_destroy_edge_aws_cluster( +fn create_and_destroy_aws_ec2_k3s_cluster( region: String, test_type: ClusterTestType, major_boot_version: u8, @@ -55,13 +55,13 @@ fn create_and_destroy_edge_aws_cluster( #[cfg(feature = "test-aws-infra")] #[named] #[test] -fn create_and_destroy_edge_aws_cluster_eu_west_3() { +fn create_and_destroy_aws_ec2_k3s_cluster_eu_west_3() { let region = "eu-west-3".to_string(); - create_and_destroy_eks_cluster( + create_and_destroy_aws_ec2_k3s_cluster( region, ClusterTestType::Classic, - K3S_MAJOR_VERSION, - K3S_MINOR_VERSION, + K3S_KUBERNETES_MAJOR_VERSION, + K3S_KUBERNETES_MINOR_VERSION, WithoutNatGateways, function_name!(), ); diff --git a/tests/aws/mod.rs b/tests/aws/mod.rs index ace8ed56..2ead3484 100644 --- a/tests/aws/mod.rs +++ b/tests/aws/mod.rs @@ -1,5 +1,6 @@ mod aws_databases; mod aws_environment; mod aws_kubernetes; +mod aws_kubernetes_ec2; mod aws_s3; mod aws_whole_enchilada; diff --git a/tests/edge/aws/mod.rs b/tests/edge/aws/mod.rs deleted file mode 100644 index 24609250..00000000 --- a/tests/edge/aws/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod edge_aws_kubernetes; diff --git a/tests/edge/mod.rs b/tests/edge/mod.rs deleted file mode 100644 index 827da9e3..00000000 --- a/tests/edge/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod aws; diff --git a/tests/lib.rs b/tests/lib.rs index 18c6bc2d..bbc13eb3 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,5 +3,4 @@ extern crate maplit; mod aws; mod digitalocean; -mod edge; mod scaleway; From 49bfcdc327383f42d9e897406887e4b9fb934330 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Sat, 23 Apr 2022 00:02:24 +0200 Subject: [PATCH 058/122] wip: fix ec2 tests --- src/cloud_provider/aws/kubernetes/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index f24bf34b..b55c48c2 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -286,8 +286,10 @@ fn tera_context( let vpc_cidr_block = options.vpc_cidr_block.clone(); let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", kubernetes.id()); let eks_cidr_subnet = options.eks_cidr_subnet.clone(); + let ec2_cidr_subnet = options.ec2_cidr_subnet.clone(); let eks_access_cidr_blocks = format_ips(&options.eks_access_cidr_blocks); + let ec2_access_cidr_blocks = format_ips(&options.ec2_access_cidr_blocks); let qovery_api_url = options.qovery_api_url.clone(); let rds_cidr_subnet = options.rds_cidr_subnet.clone(); @@ -427,6 +429,7 @@ fn tera_context( // AWS - EKS context.insert("aws_availability_zones", &aws_zones); context.insert("eks_cidr_subnet", &eks_cidr_subnet); + context.insert("ec2_cidr_subnet", &ec2_cidr_subnet); context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); @@ -444,6 +447,7 @@ fn tera_context( context.insert("ec2_workers_version", &kubernetes.version()); context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); + context.insert("ec2_access_cidr_blocks", &ec2_access_cidr_blocks); // AWS - RDS context.insert("rds_cidr_subnet", &rds_cidr_subnet); From 2446c7241ffa6b3308dff0f72daa7b2ddfd289ba Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 09:37:58 +0200 Subject: [PATCH 059/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index b55c48c2..6edefd76 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -110,7 +110,7 @@ pub struct Options { impl ProviderOptions for Options {} fn event_details>( - cloud_provider: &Box, + cloud_provider: &dyn CloudProvider, kubernetes_id: S, kubernetes_name: S, kubernetes_region: &AwsRegion, @@ -201,7 +201,7 @@ fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> Str fn tera_context( kubernetes: &dyn Kubernetes, - zones: &Vec, + zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result { @@ -493,7 +493,7 @@ fn create( kubernetes_long_id: uuid::Uuid, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); @@ -755,7 +755,7 @@ fn pause( kubernetes: &dyn Kubernetes, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); @@ -941,7 +941,7 @@ fn delete( kubernetes: &dyn Kubernetes, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); From 2b713aa80b5abec1883c4c01044d6b88519876d9 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 11:03:42 +0200 Subject: [PATCH 060/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/ec2.rs | 2 +- src/cloud_provider/aws/kubernetes/eks.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2.rs b/src/cloud_provider/aws/kubernetes/ec2.rs index fbbbe2d6..62d0ca45 100644 --- a/src/cloud_provider/aws/kubernetes/ec2.rs +++ b/src/cloud_provider/aws/kubernetes/ec2.rs @@ -48,7 +48,7 @@ impl EC2 { options: Options, logger: Box, ) -> Result { - let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let event_details = kubernetes::event_details(&**cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws-ec2/bootstrap", context.lib_root_dir()); let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; diff --git a/src/cloud_provider/aws/kubernetes/eks.rs b/src/cloud_provider/aws/kubernetes/eks.rs index 004923db..e3a78fa3 100644 --- a/src/cloud_provider/aws/kubernetes/eks.rs +++ b/src/cloud_provider/aws/kubernetes/eks.rs @@ -58,7 +58,7 @@ impl EKS { nodes_groups: Vec, logger: Box, ) -> Result { - let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let event_details = kubernetes::event_details(&**cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; From c6571629eada769bedfe74d153881651b3e36067 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 11:14:31 +0200 Subject: [PATCH 061/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/ec2.rs | 6 +++--- src/cloud_provider/aws/kubernetes/mod.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2.rs b/src/cloud_provider/aws/kubernetes/ec2.rs index 62d0ca45..defb2242 100644 --- a/src/cloud_provider/aws/kubernetes/ec2.rs +++ b/src/cloud_provider/aws/kubernetes/ec2.rs @@ -153,7 +153,7 @@ impl Kubernetes for EC2 { self.long_id, self.template_directory.as_str(), &self.zones, - &vec![], + &[], &self.options, ) }) @@ -246,7 +246,7 @@ impl Kubernetes for EC2 { self.logger(), ); send_progress_on_long_task(self, Action::Pause, || { - kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &[], &self.options) }) } @@ -276,7 +276,7 @@ impl Kubernetes for EC2 { self.logger(), ); send_progress_on_long_task(self, Action::Delete, || { - kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &[], &self.options) }) } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 6edefd76..54b7ab17 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -492,7 +492,7 @@ fn create( kubernetes: &dyn Kubernetes, kubernetes_long_id: uuid::Uuid, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { @@ -754,7 +754,7 @@ fn downgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { fn pause( kubernetes: &dyn Kubernetes, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { @@ -940,7 +940,7 @@ fn pause_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { fn delete( kubernetes: &dyn Kubernetes, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { From 0a5a7eba1f119ff50407ba7ae9bf447c78bb5133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Wed, 27 Apr 2022 10:23:00 +0200 Subject: [PATCH 062/122] Add cluster-agent installation (#699) --- .../qovery/qovery-cluster-agent/.helmignore | 23 +++++ .../qovery/qovery-cluster-agent/Chart.yaml | 6 ++ .../templates/_helpers.tpl | 54 ++++++++++++ .../templates/clusterrole.yaml | 20 +++++ .../templates/clusterrolebinding.yaml | 16 ++++ .../templates/deployment.yaml | 67 +++++++++++++++ .../templates/secret.yaml | 10 +++ .../templates/serviceaccount.yaml | 12 +++ .../qovery/qovery-cluster-agent/values.yaml | 80 ++++++++++++++++++ .../aws/kubernetes/helm_charts.rs | 20 ++++- .../digitalocean/kubernetes/helm_charts.rs | 17 +++- src/cloud_provider/helm.rs | 84 +++++++++++++++++++ src/cloud_provider/qovery.rs | 2 + .../scaleway/kubernetes/helm_charts.rs | 19 ++++- 14 files changed, 421 insertions(+), 9 deletions(-) create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/.helmignore create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/Chart.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/_helpers.tpl create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrole.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrolebinding.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/deployment.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/secret.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/serviceaccount.yaml create mode 100644 lib/common/bootstrap/charts/qovery/qovery-cluster-agent/values.yaml diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/.helmignore b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/Chart.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/Chart.yaml new file mode 100644 index 00000000..4a417aac --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: qovery-cluster-agent +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: 0.1.0 \ No newline at end of file diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/_helpers.tpl b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/_helpers.tpl new file mode 100644 index 00000000..548ce06b --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/_helpers.tpl @@ -0,0 +1,54 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "qovery-cluster-agent.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "qovery-cluster-agent.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "qovery-cluster-agent.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "qovery-cluster-agent.labels" -}} +helm.sh/chart: {{ include "qovery-cluster-agent.chart" . }} +{{ include "qovery-cluster-agent.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "qovery-cluster-agent.selectorLabels" -}} +app.kubernetes.io/name: {{ include "qovery-cluster-agent.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "qovery-cluster-agent.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "qovery-cluster-agent.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrole.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrole.yaml new file mode 100644 index 00000000..333c44ba --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterRole }} +apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }} +kind: ClusterRole +metadata: + name: {{ include "qovery-cluster-agent.fullname" . }} + labels: + {{- include "qovery-cluster-agent.labels" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespace + - events + verbs: + - get + - list + - watch +{{- end }} diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrolebinding.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..25c55115 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterRole }} +apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }} +kind: ClusterRoleBinding +metadata: + name: {{ include "qovery-cluster-agent.fullname" . }} + labels: + {{- include "qovery-cluster-agent.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "qovery-cluster-agent.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "qovery-cluster-agent.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/deployment.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/deployment.yaml new file mode 100644 index 00000000..28ae6249 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/deployment.yaml @@ -0,0 +1,67 @@ +{{- $kubefullname := include "qovery-cluster-agent.fullname" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "qovery-cluster-agent.fullname" . }} + labels: + {{- include "qovery-cluster-agent.labels" . | nindent 4 }} +spec: +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- include "qovery-cluster-agent.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "qovery-cluster-agent.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "qovery-cluster-agent.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + env: + {{ range $key, $value := .Values.environmentVariables -}} + - name: "{{ $key }}" + valueFrom: + secretKeyRef: + name: {{ $kubefullname }} + key: {{ $key }} + {{ end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/secret.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/secret.yaml new file mode 100644 index 00000000..eeabc555 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "qovery-cluster-agent.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "qovery-cluster-agent.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- toYaml .Values.environmentVariables | nindent 2 }} \ No newline at end of file diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/serviceaccount.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/serviceaccount.yaml new file mode 100644 index 00000000..9d8b094f --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "qovery-cluster-agent.serviceAccountName" . }} + labels: + {{- include "qovery-cluster-agent.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/values.yaml b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/values.yaml new file mode 100644 index 00000000..d3c3e990 --- /dev/null +++ b/lib/common/bootstrap/charts/qovery/qovery-cluster-agent/values.yaml @@ -0,0 +1,80 @@ +# Default values for qovery-agent. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: qoveryrd/cluster-agent + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +namespace: "qovery" + +labels: + app: qovery-cluster-agent + +environmentVariables: {} + #CLOUD_PROVIDER: "" + #CLOUD_REGION: "" + #AGENT_ID: "" + #ES_HOST_URL: "" + #NATS_HOST_URL: "" + #RUST_LOG: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +rbac: + create: true + clusterRole: true + apiVersion: v1 \ No newline at end of file diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 9b8fc0cb..e4595d2e 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1,8 +1,8 @@ use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use crate::cloud_provider::helm::{ - get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, - ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, - PrometheusOperatorConfigChart, ShellAgentContext, + get_chart_for_cluster_agent, get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, + ChartPayload, ChartSetValue, ChartValuesGenerated, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, + HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; use crate::cmd::kubectl::{kubectl_delete_crash_looping_pods, kubectl_exec_get_daemonset, kubectl_exec_with_output}; @@ -972,6 +972,17 @@ datasources: // }, // }; + let cluster_agent_context = ClusterAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; + let shell_context = ShellAgentContext { api_url: &chart_config_prerequisites.infra_options.qovery_api_url, api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, @@ -1189,7 +1200,8 @@ datasources: let mut level_7: Vec> = vec![ Box::new(cert_manager_config), - Box::new(qovery_agent), + Box::new(qovery_agent), // TODO: Migrate to the new cluster agent + Box::new(cluster_agent), Box::new(shell_agent), Box::new(qovery_engine), ]; diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 19b09381..f6db1401 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -1,7 +1,8 @@ use crate::cloud_provider::digitalocean::kubernetes::DoksOptions; use crate::cloud_provider::helm::{ - get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated, - CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, + get_chart_for_cluster_agent, get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, + ChartSetValue, ChartValuesGenerated, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, + HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; use crate::errors::CommandError; @@ -793,6 +794,17 @@ datasources: }, }; + let cluster_agent_context = ClusterAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; + let shell_context = ShellAgentContext { api_url: &chart_config_prerequisites.infra_options.qovery_api_url, api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, @@ -1038,6 +1050,7 @@ datasources: let mut level_6: Vec> = vec![ Box::new(cert_manager_config), Box::new(qovery_agent), + Box::new(cluster_agent), Box::new(shell_agent), Box::new(qovery_engine), Box::new(digital_mobius), diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index fe22b7e8..8e252bff 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -735,6 +735,90 @@ pub fn get_chart_for_shell_agent( Ok(shell_agent) } +pub struct ClusterAgentContext<'a> { + pub api_url: &'a str, + pub api_token: &'a str, + pub organization_long_id: &'a Uuid, + pub cluster_id: &'a str, + pub cluster_long_id: &'a Uuid, + pub cluster_token: &'a str, + pub grpc_url: &'a str, +} + +// This one is the new agent in rust +pub fn get_chart_for_cluster_agent( + context: ClusterAgentContext, + chart_path: impl Fn(&str) -> String, +) -> Result { + let shell_agent_version: QoveryShellAgent = get_qovery_app_version( + QoveryAppName::ClusterAgent, + context.api_token, + context.api_url, + context.cluster_id, + )?; + let cluster_agent = CommonChart { + chart_info: ChartInfo { + name: "cluster-agent".to_string(), + path: chart_path("common/charts/qovery/qovery-cluster-agent"), + namespace: HelmChartNamespaces::Qovery, + values: vec![ + ChartSetValue { + key: "image.tag".to_string(), + value: shell_agent_version.version, + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "environmentVariables.RUST_BACKTRACE".to_string(), + value: "full".to_string(), + }, + ChartSetValue { + key: "environmentVariables.RUST_LOG".to_string(), + value: "DEBUG".to_string(), + }, + ChartSetValue { + key: "environmentVariables.GRPC_SERVER".to_string(), + value: context.grpc_url.to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_TOKEN".to_string(), + value: context.cluster_token.to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_ID".to_string(), + value: context.cluster_long_id.to_string(), + }, + ChartSetValue { + key: "environmentVariables.ORGANIZATION_ID".to_string(), + value: context.organization_long_id.to_string(), + }, + // resources limits + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "100Mi".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "500Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + Ok(cluster_agent) +} + #[cfg(test)] mod tests { use crate::cloud_provider::helm::get_latest_successful_deployment; diff --git a/src/cloud_provider/qovery.rs b/src/cloud_provider/qovery.rs index 92943863..7148572d 100644 --- a/src/cloud_provider/qovery.rs +++ b/src/cloud_provider/qovery.rs @@ -34,6 +34,7 @@ pub enum QoveryAppName { Agent, Engine, ShellAgent, + ClusterAgent, } pub fn get_qovery_app_version( @@ -50,6 +51,7 @@ pub fn get_qovery_app_version( QoveryAppName::Agent => "agent", QoveryAppName::Engine => "engine", QoveryAppName::ShellAgent => "shellAgent", + QoveryAppName::ClusterAgent => "clusterAgent", }; let url = format!( diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 9bea4c13..75682e0a 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -1,6 +1,7 @@ use crate::cloud_provider::helm::{ - get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated, - CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, + get_chart_for_cluster_agent, get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, + ChartSetValue, ChartValuesGenerated, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, + HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext, }; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine}; use crate::cloud_provider::scaleway::kubernetes::KapsuleOptions; @@ -666,6 +667,17 @@ datasources: }, }; + let cluster_agent_context = ClusterAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; + let shell_context = ShellAgentContext { api_url: &chart_config_prerequisites.infra_options.qovery_api_url, api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, @@ -869,7 +881,8 @@ datasources: let mut level_7: Vec> = vec![ Box::new(cert_manager_config), - Box::new(qovery_agent), + Box::new(cluster_agent), + Box::new(qovery_agent), // Old agent, this one should be removed/migrated Box::new(shell_agent), Box::new(qovery_engine), ]; From bd95f272c6bcf8e1b4ba032a4050af1923e71028 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 20 Apr 2022 18:14:58 +0200 Subject: [PATCH 063/122] feat: add edge aws struct --- lib/edge/aws/backend.j2.tf | 10 + lib/edge/aws/documentdb.tf | 81 +++++ lib/edge/aws/eks-vpc-common.j2.tf | 42 +++ .../aws/eks-vpc-without-nat-gateways.j2.tf | 75 ++++ lib/edge/aws/elasticcache.tf | 80 +++++ lib/edge/aws/elasticsearch.tf | 79 +++++ lib/edge/aws/qovery-vault.j2.tf | 29 ++ lib/edge/aws/rds.tf | 118 +++++++ lib/edge/aws/s3-qovery-buckets.tf | 44 +++ lib/edge/aws/tf-default-vars.j2.tf | 319 ++++++++++++++++++ lib/edge/aws/tf-providers-aws.j2.tf | 60 ++++ src/cloud_provider/io.rs | 4 +- src/cloud_provider/mod.rs | 7 + src/io_models.rs | 207 +++++++++++- test_utilities/src/common.rs | 6 +- test_utilities/src/edge_aws_rs.rs | 1 + test_utilities/src/lib.rs | 1 + tests/edge/aws/edge_aws_kubernetes.rs | 65 ++++ tests/edge/aws/mod.rs | 1 + tests/edge/mod.rs | 1 + tests/lib.rs | 1 + 21 files changed, 1227 insertions(+), 4 deletions(-) create mode 100644 lib/edge/aws/backend.j2.tf create mode 100644 lib/edge/aws/documentdb.tf create mode 100644 lib/edge/aws/eks-vpc-common.j2.tf create mode 100644 lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf create mode 100644 lib/edge/aws/elasticcache.tf create mode 100644 lib/edge/aws/elasticsearch.tf create mode 100644 lib/edge/aws/qovery-vault.j2.tf create mode 100644 lib/edge/aws/rds.tf create mode 100644 lib/edge/aws/s3-qovery-buckets.tf create mode 100644 lib/edge/aws/tf-default-vars.j2.tf create mode 100644 lib/edge/aws/tf-providers-aws.j2.tf create mode 100644 test_utilities/src/edge_aws_rs.rs create mode 100644 tests/edge/aws/edge_aws_kubernetes.rs create mode 100644 tests/edge/aws/mod.rs create mode 100644 tests/edge/mod.rs diff --git a/lib/edge/aws/backend.j2.tf b/lib/edge/aws/backend.j2.tf new file mode 100644 index 00000000..a1418800 --- /dev/null +++ b/lib/edge/aws/backend.j2.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + access_key = "{{ aws_access_key_tfstates_account }}" + secret_key = "{{ aws_secret_key_tfstates_account }}" + bucket = "{{ aws_terraform_backend_bucket }}" + key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" + dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" + region = "{{ aws_region_tfstates_account }}" + } +} diff --git a/lib/edge/aws/documentdb.tf b/lib/edge/aws/documentdb.tf new file mode 100644 index 00000000..ea04fec0 --- /dev/null +++ b/lib/edge/aws/documentdb.tf @@ -0,0 +1,81 @@ +locals { + tags_documentdb = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "DocumentDB" + } + ) +} + +# Network + +resource "aws_subnet" "documentdb_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.documentdb_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.documentdb_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.documentdb_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_route_table_association" "documentdb_cluster_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_docdb_subnet_group" "documentdb" { + description = "DocumentDB linked to ${var.kubernetes_cluster_id}" + name = "documentdb-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) + + tags = local.tags_documentdb +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "documentdb_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow DocumentDB incoming access from anywhere" + from_port = 27017 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 27017 + type = "ingress" +} diff --git a/lib/edge/aws/eks-vpc-common.j2.tf b/lib/edge/aws/eks-vpc-common.j2.tf new file mode 100644 index 00000000..63b91880 --- /dev/null +++ b/lib/edge/aws/eks-vpc-common.j2.tf @@ -0,0 +1,42 @@ +data "aws_availability_zones" "available" {} + +locals { + tags_eks_vpc = merge( + local.tags_common, + { + Name = "qovery-eks-workers", + "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", + "kubernetes.io/role/elb" = 1, + {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} + } + ) + + tags_eks_vpc_public = merge( + local.tags_eks_vpc, + { + "Public" = "true" + } + ) + + tags_eks_vpc_private = merge( + local.tags_eks, + { + "Public" = "false" + } + ) +} + +# VPC +resource "aws_vpc" "eks" { + cidr_block = var.vpc_cidr_block + enable_dns_hostnames = true + + tags = local.tags_eks_vpc +} + +# Internet gateway +resource "aws_internet_gateway" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + tags = local.tags_eks_vpc +} \ No newline at end of file diff --git a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf new file mode 100644 index 00000000..d0174308 --- /dev/null +++ b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf @@ -0,0 +1,75 @@ +{% if vpc_qovery_network_mode == "WithoutNatGateways" %} +# Public subnets +resource "aws_subnet" "eks_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.eks_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.eks_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.eks_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_route_table" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.eks_cluster.id + } + + // todo(pmavro): add tests for it when it will be available in the SDK + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_eks_vpc +} + +resource "aws_route_table_association" "eks_cluster_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + subnet_id = aws_subnet.eks_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + subnet_id = aws_subnet.eks_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + subnet_id = aws_subnet.eks_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} +{% endif %} \ No newline at end of file diff --git a/lib/edge/aws/elasticcache.tf b/lib/edge/aws/elasticcache.tf new file mode 100644 index 00000000..44073c63 --- /dev/null +++ b/lib/edge/aws/elasticcache.tf @@ -0,0 +1,80 @@ +locals { + tags_elasticache = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "Elasticache" + } + ) +} + +# Network + +resource "aws_subnet" "elasticache_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticache_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticache_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticache_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_route_table_association" "elasticache_cluster_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_elasticache_subnet_group" "elasticache" { + description = "Elasticache linked to ${var.kubernetes_cluster_id}" + # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly + name = "elasticache-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "elasticache_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow Redis incoming access from anywhere" + from_port = 6379 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 6379 + type = "ingress" +} diff --git a/lib/edge/aws/elasticsearch.tf b/lib/edge/aws/elasticsearch.tf new file mode 100644 index 00000000..f5e873dd --- /dev/null +++ b/lib/edge/aws/elasticsearch.tf @@ -0,0 +1,79 @@ +locals { + tags_elasticsearch = merge( + local.tags_eks, + { + "Service" = "Elasticsearch" + } + ) +} + +# Network + +resource "aws_subnet" "elasticsearch_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticsearch_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticsearch_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticsearch_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_security_group" "elasticsearch" { + name = "elasticsearch-${var.kubernetes_cluster_id}" + description = "Elasticsearch security group" + vpc_id = aws_vpc.eks.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + + cidr_blocks = [ + aws_vpc.eks.cidr_block + ] + } + + tags = local.tags_elasticsearch +} diff --git a/lib/edge/aws/qovery-vault.j2.tf b/lib/edge/aws/qovery-vault.j2.tf new file mode 100644 index 00000000..b12afa38 --- /dev/null +++ b/lib/edge/aws/qovery-vault.j2.tf @@ -0,0 +1,29 @@ +locals { + kubeconfig_base64 = base64encode(local.kubeconfig) +} +// do not run for tests clusters to avoid uncleaned info. +// do not try to use count into resource, it will fails trying to connect to vault +{% if vault_auth_method != "none" and not test_cluster %} +resource "vault_generic_secret" "cluster-access" { + path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" + + data_json = < for Kind { @@ -15,6 +16,7 @@ impl From for Kind { KindModel::Aws => Kind::Aws, KindModel::Do => Kind::Do, KindModel::Scw => Kind::Scw, + KindModel::Edge(Edge::Aws) => Kind::Edge(Edge::Aws), } } } diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index 650b1d09..ba20716c 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -52,6 +52,12 @@ pub enum Kind { Aws, Do, Scw, + Edge(Edge), +} + +#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Edge { + Aws, } impl Display for Kind { @@ -60,6 +66,7 @@ impl Display for Kind { Kind::Aws => "AWS", Kind::Do => "Digital Ocean", Kind::Scw => "Scaleway", + Kind::Edge(Edge::Aws) => "Edge AWS", }) } } diff --git a/src/io_models.rs b/src/io_models.rs index 5c57e8cd..ac79b514 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -16,8 +16,8 @@ use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; -use crate::cloud_provider::Kind as CPKind; use crate::cloud_provider::{service, CloudProvider}; +use crate::cloud_provider::{Edge, Kind as CPKind, Kind}; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; @@ -295,6 +295,25 @@ impl Application { listeners, logger.clone(), )?)), + Kind::Edge(Edge::Aws) => Ok(Box::new(models::application::Application::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + self.ports.clone(), + self.total_cpus.clone(), + self.cpu_burst.clone(), + self.total_ram_in_mib, + self.min_instances, + self.max_instances, + build, + self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), + environment_variables, + self.advance_settings.clone(), + AwsAppExtraSettings {}, + listeners, + logger.clone(), + )?)), } } @@ -586,6 +605,22 @@ impl Router { )?); Ok(router) } + Kind::Edge(Edge::Aws) => { + let router = Box::new(models::router::Router::::new( + context.clone(), + self.id.as_str(), + self.name.as_str(), + self.action.to_service_action(), + self.default_domain.as_str(), + custom_domains, + routes, + self.sticky_sessions_enabled, + AwsRouterExtraSettings {}, + listeners, + logger, + )?); + Ok(router) + } } } } @@ -1064,6 +1099,176 @@ impl Database { service::DatabaseType::MongoDB, SCW::full_name().to_string(), )), + + (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + + (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } + (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { + let db = models::database::Database::::new( + context.clone(), + self.id.as_str(), + self.action.to_service_action(), + self.name.as_str(), + version, + self.fqdn.as_str(), + self.fqdn_id.as_str(), + self.total_cpus.clone(), + self.total_ram_in_mib, + self.database_instance_type.as_str(), + database_options.publicly_accessible, + database_options.port, + database_options, + listeners, + logger, + )?; + + Ok(Box::new(db)) + } } } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index c000f998..7c2b440f 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -13,6 +13,7 @@ use qovery_engine::io_models::{ use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION}; +use crate::edge_aws_rs::AWS_K3S_VERSION; use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use crate::utilities::{ db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init, @@ -29,7 +30,7 @@ use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, Kind}; +use qovery_engine::cloud_provider::{CloudProvider, Edge, Kind}; use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; @@ -1135,10 +1136,11 @@ pub fn test_db( Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + Kind::Edge(Edge::Aws) => (AWS_TEST_REGION.to_string(), AWS_K3S_VERSION.to_string()), }; let engine_config = match provider_kind { - Kind::Aws => AWS::docker_cr_engine( + Kind::Aws | Kind::Edge(Edge::Aws) => AWS::docker_cr_engine( &context, logger.clone(), localisation.as_str(), diff --git a/test_utilities/src/edge_aws_rs.rs b/test_utilities/src/edge_aws_rs.rs new file mode 100644 index 00000000..1a50ae40 --- /dev/null +++ b/test_utilities/src/edge_aws_rs.rs @@ -0,0 +1 @@ +pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; diff --git a/test_utilities/src/lib.rs b/test_utilities/src/lib.rs index 4092f39e..14b7316b 100644 --- a/test_utilities/src/lib.rs +++ b/test_utilities/src/lib.rs @@ -7,5 +7,6 @@ pub mod aws; pub mod cloudflare; pub mod common; pub mod digitalocean; +pub mod edge_aws_rs; pub mod scaleway; pub mod utilities; diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/edge/aws/edge_aws_kubernetes.rs new file mode 100644 index 00000000..fbfdacb4 --- /dev/null +++ b/tests/edge/aws/edge_aws_kubernetes.rs @@ -0,0 +1,65 @@ +extern crate test_utilities; + +use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +use ::function_name::named; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; +use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::Kind; +use std::str::FromStr; +use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + +pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; + +#[cfg(feature = "test-aws-infra")] +fn create_and_destroy_edge_aws_cluster( + region: String, + test_type: ClusterTestType, + major_boot_version: u8, + minor_boot_version: u8, + vpc_network_mode: VpcQoveryNetworkMode, + test_name: &str, +) { + engine_run_test(|| { + let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let zones = region.get_zones(); + cluster_test( + test_name, + Kind::Aws, + context( + generate_id().as_str(), + generate_cluster_id(region.to_string().as_str()).as_str(), + ), + logger(), + region.to_aws_format().as_str(), + Some(zones), + test_type, + major_boot_version, + minor_boot_version, + &ClusterDomain::Default, + Option::from(vpc_network_mode), + None, + ) + }) +} + +/* + TESTS NOTES: + It is useful to keep 2 clusters deployment tests to run in // to validate there is no name collision (overlaping) +*/ + +#[cfg(feature = "test-aws-infra")] +#[named] +#[test] +fn create_and_destroy_edge_aws_cluster_eu_west_3() { + let region = "eu-west-3".to_string(); + create_and_destroy_eks_cluster( + region, + ClusterTestType::Classic, + K3S_MAJOR_VERSION, + K3S_MINOR_VERSION, + WithoutNatGateways, + function_name!(), + ); +} diff --git a/tests/edge/aws/mod.rs b/tests/edge/aws/mod.rs new file mode 100644 index 00000000..24609250 --- /dev/null +++ b/tests/edge/aws/mod.rs @@ -0,0 +1 @@ +mod edge_aws_kubernetes; diff --git a/tests/edge/mod.rs b/tests/edge/mod.rs new file mode 100644 index 00000000..827da9e3 --- /dev/null +++ b/tests/edge/mod.rs @@ -0,0 +1 @@ +mod aws; diff --git a/tests/lib.rs b/tests/lib.rs index bbc13eb3..18c6bc2d 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,4 +3,5 @@ extern crate maplit; mod aws; mod digitalocean; +mod edge; mod scaleway; From 143660cb468f7419d3b57c8bcaaa980e70e68760 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Wed, 20 Apr 2022 18:44:45 +0200 Subject: [PATCH 064/122] fix: make ToTeraContext public trait --- src/models/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/types.rs b/src/models/types.rs index d63bbeb1..3ebbb243 100644 --- a/src/models/types.rs +++ b/src/models/types.rs @@ -27,7 +27,7 @@ pub trait CloudProvider { fn lib_directory_name() -> &'static str; } -pub(crate) trait ToTeraContext { +pub trait ToTeraContext { fn to_tera_context(&self, target: &DeploymentTarget) -> Result; } From 2e0cd0fc90a640e42d03b7f4aca5a9f53e8f1bf9 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Wed, 20 Apr 2022 22:10:28 +0200 Subject: [PATCH 065/122] wip: add ec2 Kubernetes.kind --- .../aws/kubernetes/helm_charts.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 161 +++++++++++++- src/cloud_provider/io.rs | 4 +- src/cloud_provider/kubernetes.rs | 8 +- src/cloud_provider/mod.rs | 7 - src/io_models.rs | 207 +----------------- test_utilities/src/aws.rs | 8 +- 7 files changed, 172 insertions(+), 227 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index e4595d2e..7c4614a3 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; +use crate::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; use crate::cloud_provider::helm::{ get_chart_for_cluster_agent, get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, @@ -53,7 +53,7 @@ pub struct ChartsConfigPrerequisites { pub cloudflare_api_token: String, pub disable_pleco: bool, // qovery options form json input - pub infra_options: Options, + pub infra_options: EksOptions, } pub fn aws_helm_charts( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 89816f5b..c6ee2eb9 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -73,7 +73,7 @@ impl fmt::Display for VpcQoveryNetworkMode { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Options { +pub struct EksOptions { // AWS related pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, @@ -117,7 +117,7 @@ pub struct Options { pub tls_email_report: String, } -impl ProviderOptions for Options {} +impl ProviderOptions for EksOptions {} pub struct EKS { context: Context, @@ -132,7 +132,7 @@ pub struct EKS { s3: S3, nodes_groups: Vec, template_directory: String, - options: Options, + options: EksOptions, listeners: Listeners, logger: Box, } @@ -148,7 +148,7 @@ impl EKS { zones: Vec, cloud_provider: Arc>, dns_provider: Arc>, - options: Options, + options: EksOptions, nodes_groups: Vec, logger: Box, ) -> Result { @@ -1791,3 +1791,156 @@ impl Listen for EKS { self.listeners.push(listener); } } + +pub struct Ec2Options {} + +impl ProviderOptions for Ec2Options {} + +pub struct EC2 { + context: Context, + id: String, + long_id: uuid::Uuid, + name: String, + version: String, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + template_directory: String, + options: Ec2Options, + listeners: Listeners, + logger: Box, +} + +impl Kubernetes for EC2 { + fn context(&self) -> &Context { + todo!() + } + + fn kind(&self) -> Kind { + todo!() + } + + fn id(&self) -> &str { + todo!() + } + + fn name(&self) -> &str { + todo!() + } + + fn version(&self) -> &str { + todo!() + } + + fn region(&self) -> String { + todo!() + } + + fn zone(&self) -> &str { + todo!() + } + + fn aws_zones(&self) -> Option> { + todo!() + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + todo!() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + todo!() + } + + fn logger(&self) -> &dyn Logger { + todo!() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + todo!() + } + + fn is_valid(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_create(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_create_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + todo!() + } + + fn on_upgrade(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_upgrade_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_downgrade(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_downgrade_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_pause(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_pause_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_delete(&self) -> Result<(), EngineError> { + todo!() + } + + fn on_delete_error(&self) -> Result<(), EngineError> { + todo!() + } + + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn pause_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } + + fn delete_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + todo!() + } +} + +impl Listen for EC2 { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/io.rs b/src/cloud_provider/io.rs index ed74ed8a..dc8a5810 100644 --- a/src/cloud_provider/io.rs +++ b/src/cloud_provider/io.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::{Edge, Kind as KindModel}; +use crate::cloud_provider::Kind as KindModel; use serde_derive::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] @@ -7,7 +7,6 @@ pub enum Kind { Aws, Do, Scw, - Edge(Edge), } impl From for Kind { @@ -16,7 +15,6 @@ impl From for Kind { KindModel::Aws => Kind::Aws, KindModel::Do => Kind::Do, KindModel::Scw => Kind::Scw, - KindModel::Edge(Edge::Aws) => Kind::Edge(Edge::Aws), } } } diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 49174f8b..4ff9cf5c 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -364,6 +364,7 @@ pub trait KubernetesNode { #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum Kind { Eks, + Ec2, Doks, ScwKapsule, } @@ -372,6 +373,7 @@ impl Display for Kind { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Kind::Eks => "EKS", + Kind::Ec2 => "EC2", Kind::Doks => "DOKS", Kind::ScwKapsule => "ScwKapsule", }) @@ -404,6 +406,10 @@ pub fn deploy_environment( kubernetes, environment, }, + Kind::Ec2 => DeploymentTarget { + kubernetes, + environment, + }, Kind::Doks => DeploymentTarget { kubernetes, environment, @@ -1790,7 +1796,7 @@ mod tests { "systemUUID": "EC2E8B4C-92F9-213B-09B5-C0CD11A7EEB7" } } - } + } ], "kind": "List", "metadata": { diff --git a/src/cloud_provider/mod.rs b/src/cloud_provider/mod.rs index ba20716c..650b1d09 100644 --- a/src/cloud_provider/mod.rs +++ b/src/cloud_provider/mod.rs @@ -52,12 +52,6 @@ pub enum Kind { Aws, Do, Scw, - Edge(Edge), -} - -#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Edge { - Aws, } impl Display for Kind { @@ -66,7 +60,6 @@ impl Display for Kind { Kind::Aws => "AWS", Kind::Do => "Digital Ocean", Kind::Scw => "Scaleway", - Kind::Edge(Edge::Aws) => "Edge AWS", }) } } diff --git a/src/io_models.rs b/src/io_models.rs index ac79b514..5c57e8cd 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -16,8 +16,8 @@ use url::Url; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::service::{DatabaseOptions, RouterService}; +use crate::cloud_provider::Kind as CPKind; use crate::cloud_provider::{service, CloudProvider}; -use crate::cloud_provider::{Edge, Kind as CPKind, Kind}; use crate::cmd::docker::Docker; use crate::container_registry::ContainerRegistryInfo; use crate::logger::Logger; @@ -295,25 +295,6 @@ impl Application { listeners, logger.clone(), )?)), - Kind::Edge(Edge::Aws) => Ok(Box::new(models::application::Application::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - self.ports.clone(), - self.total_cpus.clone(), - self.cpu_burst.clone(), - self.total_ram_in_mib, - self.min_instances, - self.max_instances, - build, - self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), - environment_variables, - self.advance_settings.clone(), - AwsAppExtraSettings {}, - listeners, - logger.clone(), - )?)), } } @@ -605,22 +586,6 @@ impl Router { )?); Ok(router) } - Kind::Edge(Edge::Aws) => { - let router = Box::new(models::router::Router::::new( - context.clone(), - self.id.as_str(), - self.name.as_str(), - self.action.to_service_action(), - self.default_domain.as_str(), - custom_domains, - routes, - self.sticky_sessions_enabled, - AwsRouterExtraSettings {}, - listeners, - logger, - )?); - Ok(router) - } } } } @@ -1099,176 +1064,6 @@ impl Database { service::DatabaseType::MongoDB, SCW::full_name().to_string(), )), - - (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - - (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Redis, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } - (CPKind::Edge(Edge::Aws), DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { - let db = models::database::Database::::new( - context.clone(), - self.id.as_str(), - self.action.to_service_action(), - self.name.as_str(), - version, - self.fqdn.as_str(), - self.fqdn_id.as_str(), - self.total_cpus.clone(), - self.total_ram_in_mib, - self.database_instance_type.as_str(), - database_options.publicly_accessible, - database_options.port, - database_options, - listeners, - logger, - )?; - - Ok(Box::new(db)) - } } } } diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 36fb944c..5921dd71 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -2,7 +2,7 @@ extern crate serde; extern crate serde_derive; use const_format::formatcp; -use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; +use qovery_engine::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::models::NodeGroups; @@ -65,7 +65,7 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> None, ) } -impl Cluster for AWS { +impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, @@ -147,8 +147,8 @@ impl Cluster for AWS { ] } - fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { - Options { + fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> EksOptions { + EksOptions { eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From da2e465320129f1b7096e7f8e88097bf7cf1566d Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 11:49:23 +0200 Subject: [PATCH 066/122] wip: add EC2.new(..) with inner properties --- .../aws/kubernetes/helm_charts.rs | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 149 ++++++++++++------ test_utilities/src/aws.rs | 8 +- 3 files changed, 111 insertions(+), 50 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 7c4614a3..e4595d2e 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; +use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use crate::cloud_provider::helm::{ get_chart_for_cluster_agent, get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, @@ -53,7 +53,7 @@ pub struct ChartsConfigPrerequisites { pub cloudflare_api_token: String, pub disable_pleco: bool, // qovery options form json input - pub infra_options: EksOptions, + pub infra_options: Options, } pub fn aws_helm_charts( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index c6ee2eb9..d08b2272 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -73,8 +73,9 @@ impl fmt::Display for VpcQoveryNetworkMode { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EksOptions { +pub struct Options { // AWS related + // TODO add ec2_zone_x_subnet_blocks pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, pub eks_zone_c_subnet_blocks: Vec, @@ -117,7 +118,7 @@ pub struct EksOptions { pub tls_email_report: String, } -impl ProviderOptions for EksOptions {} +impl ProviderOptions for Options {} pub struct EKS { context: Context, @@ -132,7 +133,7 @@ pub struct EKS { s3: S3, nodes_groups: Vec, template_directory: String, - options: EksOptions, + options: Options, listeners: Listeners, logger: Box, } @@ -148,36 +149,14 @@ impl EKS { zones: Vec, cloud_provider: Arc>, dns_provider: Arc>, - options: EksOptions, + options: Options, nodes_groups: Vec, logger: Box, ) -> Result { - let event_details = EventDetails::new( - Some(cloud_provider.kind()), - QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), - QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), - QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), - Some(region.to_string()), - Stage::Infrastructure(InfrastructureStep::LoadConfiguration), - Transmitter::Kubernetes(id.to_string(), name.to_string()), - ); - + let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones: Vec = Vec::with_capacity(3); - for zone in zones { - match AwsZones::from_string(zone.to_string()) { - Ok(x) => aws_zones.push(x), - Err(e) => { - return Err(EngineError::new_unsupported_zone( - event_details, - region.to_string(), - zone, - CommandError::new_from_safe_message(e.to_string()), - )) - } - }; - } + let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { @@ -190,17 +169,7 @@ impl EKS { } } - // TODO export this - let s3 = S3::new( - context.clone(), - "s3-temp-id".to_string(), - "default-s3".to_string(), - cloud_provider.access_key_id(), - cloud_provider.secret_access_key(), - region.clone(), - true, - context.resource_expiration_in_seconds(), - ); + let s3 = s3(&context, ®ion, cloud_provider.as_ref()); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -1792,10 +1761,6 @@ impl Listen for EKS { } } -pub struct Ec2Options {} - -impl ProviderOptions for Ec2Options {} - pub struct EC2 { context: Context, id: String, @@ -1808,11 +1773,52 @@ pub struct EC2 { dns_provider: Arc>, s3: S3, template_directory: String, - options: Ec2Options, + options: Options, listeners: Listeners, logger: Box, } +impl EC2 { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + logger: Box, + ) -> Result { + let event_details = event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + + let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EC2 { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + template_directory, + logger, + listeners, + }) + } +} + impl Kubernetes for EC2 { fn context(&self) -> &Context { todo!() @@ -1944,3 +1950,58 @@ impl Listen for EC2 { self.listeners.push(listener); } } + +fn event_details>( + cloud_provider: &Box, + kubernetes_id: S, + kubernetes_name: S, + kubernetes_region: &AwsRegion, + context: &Context, +) -> EventDetails { + EventDetails::new( + Some(cloud_provider.kind()), + QoveryIdentifier::new_from_long_id(context.organization_id().to_string()), + QoveryIdentifier::new_from_long_id(context.cluster_id().to_string()), + QoveryIdentifier::new_from_long_id(context.execution_id().to_string()), + Some(kubernetes_region.to_string()), + Stage::Infrastructure(InfrastructureStep::LoadConfiguration), + Transmitter::Kubernetes(kubernetes_id.into(), kubernetes_name.into()), + ) +} + +fn aws_zones( + zones: Vec, + region: &AwsRegion, + event_details: &EventDetails, +) -> Result, EngineError> { + let mut aws_zones = vec![]; + + for zone in zones { + match AwsZones::from_string(zone.to_string()) { + Ok(x) => aws_zones.push(x), + Err(e) => { + return Err(EngineError::new_unsupported_zone( + event_details.clone(), + region.to_string(), + zone, + CommandError::new_from_safe_message(e.to_string()), + )) + } + }; + } + + Ok(aws_zones) +} + +fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box) -> S3 { + S3::new( + context.clone(), + "s3-temp-id".to_string(), + "default-s3".to_string(), + cloud_provider.access_key_id(), + cloud_provider.secret_access_key(), + region.clone(), + true, + context.resource_expiration_in_seconds(), + ) +} diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 5921dd71..36fb944c 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -2,7 +2,7 @@ extern crate serde; extern crate serde_derive; use const_format::formatcp; -use qovery_engine::cloud_provider::aws::kubernetes::{EksOptions, VpcQoveryNetworkMode}; +use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::models::NodeGroups; @@ -65,7 +65,7 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> None, ) } -impl Cluster for AWS { +impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, @@ -147,8 +147,8 @@ impl Cluster for AWS { ] } - fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> EksOptions { - EksOptions { + fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { + Options { eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From 6e7c8b9e606dd43e5cf3a6c616abecaeab22bd81 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:05:38 +0200 Subject: [PATCH 067/122] wip: prepare AWS EC2 Kubernetes provider --- src/cloud_provider/aws/kubernetes/mod.rs | 2397 ++++++++++++---------- test_utilities/src/aws.rs | 9 +- test_utilities/src/common.rs | 73 +- test_utilities/src/digitalocean.rs | 5 + test_utilities/src/scaleway.rs | 32 +- tests/aws/aws_kubernetes.rs | 13 +- tests/aws/aws_whole_enchilada.rs | 2 + tests/edge/aws/edge_aws_kubernetes.rs | 3 + tests/scaleway/scw_kubernetes.rs | 3 + tests/scaleway/scw_whole_enchilada.rs | 3 + 10 files changed, 1437 insertions(+), 1103 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index d08b2272..15eff3e6 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -156,7 +156,7 @@ impl EKS { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let aws_zones = aws_zones(zones, ®ion, &event_details)?; for node_group in &nodes_groups { if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { @@ -192,51 +192,6 @@ impl EKS { }) } - fn get_engine_location(&self) -> EngineLocation { - self.options.qovery_engine_location.clone() - } - - fn kubeconfig_bucket_name(&self) -> String { - format!("qovery-kubeconfigs-{}", self.id()) - } - - fn managed_dns_resolvers_terraform_format(&self) -> String { - let managed_dns_resolvers: Vec = self - .dns_provider - .resolvers() - .iter() - .map(|x| format!("{}", x.clone())) - .collect(); - - terraform_list_format(managed_dns_resolvers) - } - - fn lets_encrypt_url(&self) -> String { - match &self.context.is_test_cluster() { - true => "https://acme-staging-v02.api.letsencrypt.org/directory", - false => "https://acme-v02.api.letsencrypt.org/directory", - } - .to_string() - } - - /// divide by 2 the total number of subnet to get the exact same number as private and public - fn check_odd_subnets( - &self, - event_details: EventDetails, - zone_name: &str, - subnet_block: &[String], - ) -> Result { - if subnet_block.len() % 2 == 1 { - return Err(EngineError::new_subnets_count_is_not_even( - event_details, - zone_name.to_string(), - subnet_block.len(), - )); - } - - Ok((subnet_block.len() / 2) as usize) - } - fn set_cluster_autoscaler_replicas( &self, event_details: EventDetails, @@ -270,983 +225,6 @@ impl EKS { Ok(()) } - fn tera_context(&self) -> Result { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); - let mut context = TeraContext::new(); - - let format_ips = - |ips: &Vec| -> Vec { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::>() }; - let format_zones = |zones: &Vec| -> Vec { - zones - .iter() - .map(|zone| zone.to_terraform_format_string()) - .collect::>() - }; - - let aws_zones = format_zones(&self.zones); - - let mut eks_zone_a_subnet_blocks_private = format_ips(&self.options.eks_zone_a_subnet_blocks); - let mut eks_zone_b_subnet_blocks_private = format_ips(&self.options.eks_zone_b_subnet_blocks); - let mut eks_zone_c_subnet_blocks_private = format_ips(&self.options.eks_zone_c_subnet_blocks); - - match self.options.vpc_qovery_network_mode { - VpcQoveryNetworkMode::WithNatGateways => { - let max_subnet_zone_a = - self.check_odd_subnets(event_details.clone(), "a", &eks_zone_a_subnet_blocks_private)?; - let max_subnet_zone_b = - self.check_odd_subnets(event_details.clone(), "b", &eks_zone_b_subnet_blocks_private)?; - let max_subnet_zone_c = - self.check_odd_subnets(event_details.clone(), "c", &eks_zone_c_subnet_blocks_private)?; - - let eks_zone_a_subnet_blocks_public: Vec = - eks_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); - let eks_zone_b_subnet_blocks_public: Vec = - eks_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); - let eks_zone_c_subnet_blocks_public: Vec = - eks_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); - - context.insert("eks_zone_a_subnet_blocks_public", &eks_zone_a_subnet_blocks_public); - context.insert("eks_zone_b_subnet_blocks_public", &eks_zone_b_subnet_blocks_public); - context.insert("eks_zone_c_subnet_blocks_public", &eks_zone_c_subnet_blocks_public); - } - VpcQoveryNetworkMode::WithoutNatGateways => {} - }; - context.insert("vpc_qovery_network_mode", &self.options.vpc_qovery_network_mode.to_string()); - - let rds_zone_a_subnet_blocks = format_ips(&self.options.rds_zone_a_subnet_blocks); - let rds_zone_b_subnet_blocks = format_ips(&self.options.rds_zone_b_subnet_blocks); - let rds_zone_c_subnet_blocks = format_ips(&self.options.rds_zone_c_subnet_blocks); - - let documentdb_zone_a_subnet_blocks = format_ips(&self.options.documentdb_zone_a_subnet_blocks); - let documentdb_zone_b_subnet_blocks = format_ips(&self.options.documentdb_zone_b_subnet_blocks); - let documentdb_zone_c_subnet_blocks = format_ips(&self.options.documentdb_zone_c_subnet_blocks); - - let elasticache_zone_a_subnet_blocks = format_ips(&self.options.elasticache_zone_a_subnet_blocks); - let elasticache_zone_b_subnet_blocks = format_ips(&self.options.elasticache_zone_b_subnet_blocks); - let elasticache_zone_c_subnet_blocks = format_ips(&self.options.elasticache_zone_c_subnet_blocks); - - let elasticsearch_zone_a_subnet_blocks = format_ips(&self.options.elasticsearch_zone_a_subnet_blocks); - let elasticsearch_zone_b_subnet_blocks = format_ips(&self.options.elasticsearch_zone_b_subnet_blocks); - let elasticsearch_zone_c_subnet_blocks = format_ips(&self.options.elasticsearch_zone_c_subnet_blocks); - - let region_cluster_id = format!("{}-{}", self.region(), self.id()); - let vpc_cidr_block = self.options.vpc_cidr_block.clone(); - let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", self.id()); - let eks_cidr_subnet = self.options.eks_cidr_subnet.clone(); - - let eks_access_cidr_blocks = format_ips(&self.options.eks_access_cidr_blocks); - - let qovery_api_url = self.options.qovery_api_url.clone(); - let rds_cidr_subnet = self.options.rds_cidr_subnet.clone(); - let documentdb_cidr_subnet = self.options.documentdb_cidr_subnet.clone(); - let elasticache_cidr_subnet = self.options.elasticache_cidr_subnet.clone(); - let elasticsearch_cidr_subnet = self.options.elasticsearch_cidr_subnet.clone(); - - // Qovery - context.insert("organization_id", self.cloud_provider.organization_id()); - context.insert("qovery_api_url", &qovery_api_url); - - context.insert("engine_version_controller_token", &self.options.engine_version_controller_token); - context.insert("agent_version_controller_token", &self.options.agent_version_controller_token); - - context.insert("test_cluster", &self.context.is_test_cluster()); - if self.context.resource_expiration_in_seconds().is_some() { - context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) - } - context.insert("force_upgrade", &self.context.requires_forced_upgrade()); - - // Qovery features - context.insert("log_history_enabled", &self.context.is_feature_enabled(&Features::LogsHistory)); - context.insert( - "metrics_history_enabled", - &self.context.is_feature_enabled(&Features::MetricsHistory), - ); - - // DNS configuration - let managed_dns_list = vec![self.dns_provider.name()]; - let managed_dns_domains_helm_format = vec![self.dns_provider.domain().to_string()]; - let managed_dns_domains_root_helm_format = vec![self.dns_provider.domain().root_domain().to_string()]; - let managed_dns_domains_terraform_format = terraform_list_format(vec![self.dns_provider.domain().to_string()]); - let managed_dns_domains_root_terraform_format = - terraform_list_format(vec![self.dns_provider.domain().root_domain().to_string()]); - let managed_dns_resolvers_terraform_format = self.managed_dns_resolvers_terraform_format(); - - context.insert("managed_dns", &managed_dns_list); - context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); - context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); - context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); - context.insert( - "managed_dns_domains_root_terraform_format", - &managed_dns_domains_root_terraform_format, - ); - context.insert( - "managed_dns_resolvers_terraform_format", - &managed_dns_resolvers_terraform_format, - ); - - match self.dns_provider.kind() { - dns_provider::Kind::Cloudflare => { - context.insert("external_dns_provider", self.dns_provider.provider_name()); - context.insert("cloudflare_api_token", self.dns_provider.token()); - context.insert("cloudflare_email", self.dns_provider.account()); - } - }; - - context.insert("dns_email_report", &self.options.tls_email_report); - - // TLS - context.insert("acme_server_url", &self.lets_encrypt_url()); - - // Vault - context.insert("vault_auth_method", "none"); - - if env::var_os("VAULT_ADDR").is_some() { - // select the correct used method - match env::var_os("VAULT_ROLE_ID") { - Some(role_id) => { - context.insert("vault_auth_method", "app_role"); - context.insert("vault_role_id", role_id.to_str().unwrap()); - - match env::var_os("VAULT_SECRET_ID") { - Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), - None => self.logger().log(EngineEvent::Error( - EngineError::new_missing_required_env_variable( - event_details, - "VAULT_SECRET_ID".to_string(), - ), - None, - )), - } - } - None => { - if env::var_os("VAULT_TOKEN").is_some() { - context.insert("vault_auth_method", "token") - } - } - } - }; - - // Other Kubernetes - context.insert("kubernetes_cluster_name", &self.cluster_name()); - context.insert("enable_cluster_autoscaler", &true); - - // AWS - context.insert("aws_access_key", &self.cloud_provider.access_key_id()); - context.insert("aws_secret_key", &self.cloud_provider.secret_access_key()); - - // AWS S3 tfstate storage - context.insert( - "aws_access_key_tfstates_account", - self.cloud_provider() - .terraform_state_credentials() - .access_key_id - .as_str(), - ); - - context.insert( - "aws_secret_key_tfstates_account", - self.cloud_provider() - .terraform_state_credentials() - .secret_access_key - .as_str(), - ); - context.insert( - "aws_region_tfstates_account", - self.cloud_provider().terraform_state_credentials().region.as_str(), - ); - - context.insert("aws_region", &self.region()); - context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); - context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); - context.insert("vpc_cidr_block", &vpc_cidr_block); - context.insert("vpc_custom_routing_table", &self.options.vpc_custom_routing_table); - context.insert("s3_kubeconfig_bucket", &self.kubeconfig_bucket_name()); - - // AWS - EKS - context.insert("aws_availability_zones", &aws_zones); - context.insert("eks_cidr_subnet", &eks_cidr_subnet); - context.insert("kubernetes_cluster_name", &self.name()); - context.insert("kubernetes_cluster_id", self.id()); - context.insert("kubernetes_full_cluster_id", &self.long_id); - context.insert("eks_region_cluster_id", region_cluster_id.as_str()); - context.insert("eks_worker_nodes", &self.nodes_groups); - context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); - context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); - context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); - context.insert("eks_masters_version", &self.version()); - context.insert("eks_workers_version", &self.version()); - context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); - context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); - - // AWS - RDS - context.insert("rds_cidr_subnet", &rds_cidr_subnet); - context.insert("rds_zone_a_subnet_blocks", &rds_zone_a_subnet_blocks); - context.insert("rds_zone_b_subnet_blocks", &rds_zone_b_subnet_blocks); - context.insert("rds_zone_c_subnet_blocks", &rds_zone_c_subnet_blocks); - - // AWS - DocumentDB - context.insert("documentdb_cidr_subnet", &documentdb_cidr_subnet); - context.insert("documentdb_zone_a_subnet_blocks", &documentdb_zone_a_subnet_blocks); - context.insert("documentdb_zone_b_subnet_blocks", &documentdb_zone_b_subnet_blocks); - context.insert("documentdb_zone_c_subnet_blocks", &documentdb_zone_c_subnet_blocks); - - // AWS - Elasticache - context.insert("elasticache_cidr_subnet", &elasticache_cidr_subnet); - context.insert("elasticache_zone_a_subnet_blocks", &elasticache_zone_a_subnet_blocks); - context.insert("elasticache_zone_b_subnet_blocks", &elasticache_zone_b_subnet_blocks); - context.insert("elasticache_zone_c_subnet_blocks", &elasticache_zone_c_subnet_blocks); - - // AWS - Elasticsearch - context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); - context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); - context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); - context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); - - // grafana credentials - context.insert("grafana_admin_user", self.options.grafana_admin_user.as_str()); - context.insert("grafana_admin_password", self.options.grafana_admin_password.as_str()); - - // qovery - context.insert("qovery_api_url", self.options.qovery_api_url.as_str()); - context.insert("qovery_nats_url", self.options.qovery_nats_url.as_str()); - context.insert("qovery_nats_user", self.options.qovery_nats_user.as_str()); - context.insert("qovery_nats_password", self.options.qovery_nats_password.as_str()); - context.insert("qovery_ssh_key", self.options.qovery_ssh_key.as_str()); - context.insert("discord_api_key", self.options.discord_api_key.as_str()); - - Ok(context) - } - - fn create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), - )); - self.send_to_customer( - format!("Preparing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - // upgrade cluster instead if required - match self.get_kubeconfig_file() { - Ok((path, _)) => match is_kubernetes_upgrade_required( - path, - &self.version, - self.cloud_provider.credentials_environment_variables(), - event_details.clone(), - self.logger(), - ) { - Ok(x) => { - if x.required_upgrade_on.is_some() { - return self.upgrade_with_status(x); - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), - )) - } - Err(e) => { - self.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( - "Error detected, upgrade won't occurs, but standard deployment.".to_string(), - )))); - } - }, - Err(_) => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) - - }; - - // create AWS IAM roles - let already_created_roles = get_default_roles_to_create(); - for role in already_created_roles { - match role.create_service_linked_role( - self.cloud_provider.access_key_id().as_str(), - self.cloud_provider.secret_access_key().as_str(), - ) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Role {} is already present, no need to create", - role.role_name - )), - )), - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), - None, - )), - } - } - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let context = self.tera_context()?; - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deploying EKS cluster.".to_string()), - )); - self.send_to_customer( - format!("Deploying EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - - // terraform deployment dedicated to cloud resources - if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - - // kubernetes helm deployments on the cluster - // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform - let kubeconfig_path = &self.get_kubeconfig_file_path()?; - let kubeconfig_path = Path::new(kubeconfig_path); - - let credentials_environment_variables: Vec<(String, String)> = self - .cloud_provider - .credentials_environment_variables() - .into_iter() - .map(|x| (x.0.to_string(), x.1.to_string())) - .collect(); - - let charts_prerequisites = ChartsConfigPrerequisites { - organization_id: self.cloud_provider.organization_id().to_string(), - organization_long_id: self.cloud_provider.organization_long_id(), - infra_options: self.options.clone(), - cluster_id: self.id.clone(), - cluster_long_id: self.long_id, - region: self.region(), - cluster_name: self.cluster_name(), - cloud_provider: "aws".to_string(), - test_cluster: self.context.is_test_cluster(), - aws_access_key_id: self.cloud_provider.access_key_id(), - aws_secret_access_key: self.cloud_provider.secret_access_key(), - vpc_qovery_network_mode: self.options.vpc_qovery_network_mode.clone(), - qovery_engine_location: self.get_engine_location(), - ff_log_history_enabled: self.context.is_feature_enabled(&Features::LogsHistory), - ff_metrics_history_enabled: self.context.is_feature_enabled(&Features::MetricsHistory), - managed_dns_name: self.dns_provider.domain().to_string(), - managed_dns_helm_format: self.dns_provider.domain().to_helm_format_string(), - managed_dns_resolvers_terraform_format: self.managed_dns_resolvers_terraform_format(), - external_dns_provider: self.dns_provider.provider_name().to_string(), - dns_email_report: self.options.tls_email_report.clone(), - acme_url: self.lets_encrypt_url(), - cloudflare_email: self.dns_provider.account().to_string(), - cloudflare_api_token: self.dns_provider.token().to_string(), - disable_pleco: self.context.disable_pleco(), - }; - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), - )); - let helm_charts_to_deploy = aws_helm_charts( - format!("{}/qovery-tf-config.json", &temp_dir).as_str(), - &charts_prerequisites, - Some(&temp_dir), - kubeconfig_path, - &credentials_environment_variables, - ) - .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; - - deploy_charts_levels( - kubeconfig_path, - &credentials_environment_variables, - helm_charts_to_deploy, - self.context.is_dry_run_deploy(), - ) - .map_err(|e| EngineError::new_helm_charts_deploy_error(event_details.clone(), e)) - } - - fn create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - let (kubeconfig_path, _) = self.get_kubeconfig_file()?; - let environment_variables: Vec<(&str, &str)> = self.cloud_provider.credentials_environment_variables(); - - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), - EventMessage::new_from_safe("EKS.create_error() called.".to_string()), - )); - - match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { - Ok(ok_line) => self - .logger() - .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), - Err(err) => self.logger().log(EngineEvent::Warning( - event_details, - EventMessage::new( - "Error trying to get kubernetes events".to_string(), - Some(err.message(ErrorMessageVerbosity::FullDetails)), - ), - )), - }; - - Ok(()) - } - - fn upgrade_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), - EventMessage::new_from_safe("EKS.upgrade_error() called.".to_string()), - )); - - Ok(()) - } - - fn downgrade(&self) -> Result<(), EngineError> { - Ok(()) - } - - fn downgrade_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), - EventMessage::new_from_safe("EKS.downgrade_error() called.".to_string()), - )); - - Ok(()) - } - - fn pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.send_to_customer( - format!("Preparing EKS {} cluster pause with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - - self.logger().log(EngineEvent::Info( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing EKS cluster pause.".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let mut context = self.tera_context()?; - - // pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc... - let worker_nodes: Vec = Vec::new(); - context.insert("eks_worker_nodes", &worker_nodes); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - // pause: only select terraform workers elements to pause to avoid applying on the whole config - // this to avoid failures because of helm deployments on removing workers nodes - let tf_workers_resources = match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let mut tf_workers_resources_name = Vec::new(); - for name in x { - if name.starts_with("aws_eks_node_group.") { - tf_workers_resources_name.push(name); - } - } - tf_workers_resources_name - } - Err(e) => { - let error = EngineError::new_terraform_state_does_not_exist(event_details, e); - self.logger().log(EngineEvent::Error(error.clone(), None)); - return Err(error); - } - }; - - if tf_workers_resources.is_empty() { - return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); - } - - let kubernetes_config_file_path = self.get_kubeconfig_file_path()?; - - // pause: wait 1h for the engine to have 0 running jobs before pausing and avoid getting unreleased lock (from helm or terraform for example) - if self.get_engine_location() == EngineLocation::ClientSide { - match self.context.is_feature_enabled(&Features::MetricsHistory) { - true => { - let metric_name = "taskmanager_nb_running_tasks"; - let wait_engine_job_finish = retry::retry(Fixed::from_millis(60000).take(60), || { - return match kubectl_exec_api_custom_metrics( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - "qovery", - None, - metric_name, - ) { - Ok(metrics) => { - let mut current_engine_jobs = 0; - - for metric in metrics.items { - match metric.value.parse::() { - Ok(job_count) if job_count > 0 => current_engine_jobs += 1, - Err(e) => { - return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics( - event_details.clone(), - CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); - } - _ => {} - } - } - - if current_engine_jobs == 0 { - OperationResult::Ok(()) - } else { - OperationResult::Retry(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), None)) - } - } - Err(e) => { - OperationResult::Retry( - EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), e)) - } - }; - }); - - match wait_engine_job_finish { - Ok(_) => { - self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); - } - Err(Operation { error, .. }) => { - return Err(error) - } - Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) - } - } - } - false => self.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), - } - } - - let mut terraform_args_string = vec!["apply".to_string(), "-auto-approve".to_string()]; - for x in tf_workers_resources { - terraform_args_string.push(format!("-target={}", x)); - } - let terraform_args = terraform_args_string.iter().map(|x| &**x).collect(); - - self.send_to_customer( - format!("Pausing EKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), - )); - - match terraform_exec(temp_dir.as_str(), terraform_args) { - Ok(_) => { - let message = format!("Kubernetes cluster {} successfully paused", self.name()); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); - Ok(()) - } - Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), - } - } - - fn pause_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("EKS.pause_error() called.".to_string()), - )); - - Ok(()) - } - - fn delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - let listeners_helper = ListenersHelper::new(&self.listeners); - let mut skip_kubernetes_step = false; - - self.send_to_customer( - format!("Preparing to delete EKS cluster {} with id {}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing to delete EKS cluster.".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let context = self.tera_context()?; - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context, - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - bootstrap_charts_dir, - common_charts_temp_dir, - e, - )); - } - - let kubernetes_config_file_path = match self.get_kubeconfig_file_path() { - Ok(x) => x, - Err(e) => { - let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(safe_message.to_string(), Some(e.message(ErrorMessageVerbosity::FullDetails))), - )); - - skip_kubernetes_step = true; - "".to_string() - } - }; - - // should apply before destroy to be sure destroy will compute on all resources - // don't exit on failure, it can happen if we resume a destroy process - let message = format!( - "Ensuring everything is up to date before deleting cluster {}/{}", - self.name(), - self.id() - ); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), - )); - if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { - // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy - self.logger().log(EngineEvent::Error( - EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), - None, - )); - }; - - if !skip_kubernetes_step { - // should make the diff between all namespaces and qovery managed namespaces - let message = format!( - "Deleting all non-Qovery deployed applications and dependencies for cluster {}/{}", - self.name(), - self.id() - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(message.to_string()), - )); - self.send_to_customer(&message, &listeners_helper); - - let all_namespaces = kubectl_exec_get_all_namespaces( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - ); - - match all_namespaces { - Ok(namespace_vec) => { - let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); - let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), - )); - - for namespace_to_delete in namespaces_to_delete.iter() { - match cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, - namespace_to_delete, - self.cloud_provider().credentials_environment_variables(), - ) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Namespace `{}` deleted successfully.", - namespace_to_delete - )), - )), - Err(e) => { - if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!( - "Can't delete the namespace `{}`", - namespace_to_delete - )), - )); - } - } - } - } - } - Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), - )); - } - } - - let message = format!( - "Deleting all Qovery deployed elements and associated dependencies for cluster {}/{}", - self.name(), - self.id() - ); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - // delete custom metrics api to avoid stale namespaces on deletion - let helm = Helm::new( - &kubernetes_config_file_path, - &self.cloud_provider.credentials_environment_variables(), - ) - .map_err(|e| to_engine_error(&event_details, e))?; - let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); - helm.uninstall(&chart, &[]) - .map_err(|e| to_engine_error(&event_details, e))?; - - // required to avoid namespace stuck on deletion - uninstall_cert_manager( - &kubernetes_config_file_path, - self.cloud_provider().credentials_environment_variables(), - event_details.clone(), - self.logger(), - )?; - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), - )); - - let qovery_namespaces = get_qovery_managed_namespaces(); - for qovery_namespace in qovery_namespaces.iter() { - let charts_to_delete = helm - .list_release(Some(qovery_namespace), &[]) - .map_err(|e| to_engine_error(&event_details, e))?; - - for chart in charts_to_delete { - let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - )), - Err(e) => { - let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } - } - } - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), - )); - - for qovery_namespace in qovery_namespaces.iter() { - let deletion = cmd::kubectl::kubectl_exec_delete_namespace( - &kubernetes_config_file_path, - qovery_namespace, - self.cloud_provider().credentials_environment_variables(), - ); - match deletion { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), - )), - Err(e) => { - if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), - )) - } - } - } - } - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), - )); - - match helm.list_release(None, &[]) { - Ok(helm_charts) => { - for chart in helm_charts { - let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); - match helm.uninstall(&chart_info, &[]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), - )), - Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } - } - } - } - Err(e) => { - let message_safe = "Unable to get helm list"; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - )) - } - } - }; - - let message = format!("Deleting Kubernetes cluster {}/{}", self.name(), self.id()); - self.send_to_customer(&message, &listeners_helper); - self.logger() - .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); - - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Running Terraform destroy".to_string()), - )); - - match retry::retry(Fibonacci::from_millis(60000).take(3), || { - match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { - Ok(_) => OperationResult::Ok(()), - Err(e) => OperationResult::Retry(e), - } - }) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes cluster {}/{} successfully deleted", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details, - EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), - )); - Ok(()) - } - Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - error, - )), - Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( - event_details, - CommandError::new("Error while trying to perform Terraform destroy".to_string(), Some(msg), None), - )), - } - } - - fn delete_error(&self) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Warning( - self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), - EventMessage::new_from_safe("EKS.delete_error() called.".to_string()), - )); - - Ok(()) - } - fn cloud_provider_name(&self) -> &str { "aws" } @@ -1320,7 +298,16 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.create()) + send_progress_on_long_task(self, Action::Create, || { + create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1334,7 +321,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.create_error()) + send_progress_on_long_task(self, Action::Create, || create_error(self)) } fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { @@ -1358,7 +345,7 @@ impl Kubernetes for EKS { let temp_dir = self.get_temp_dir(event_details.clone())?; // generate terraform files and copy them into temp dir - let mut context = self.tera_context()?; + let mut context = tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; // // Upgrade master nodes @@ -1579,7 +566,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.upgrade_error()) + send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) } #[named] @@ -1593,7 +580,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.downgrade()) + send_progress_on_long_task(self, Action::Create, || downgrade()) } #[named] @@ -1607,7 +594,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || self.downgrade_error()) + send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) } #[named] @@ -1621,7 +608,15 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Pause, || self.pause()) + send_progress_on_long_task(self, Action::Pause, || { + pause( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1635,7 +630,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Pause, || self.pause_error()) + send_progress_on_long_task(self, Action::Pause, || pause_error(self)) } #[named] @@ -1649,7 +644,15 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Delete, || self.delete()) + send_progress_on_long_task(self, Action::Delete, || { + delete( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) } #[named] @@ -1663,7 +666,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Delete, || self.delete_error()) + send_progress_on_long_task(self, Action::Delete, || delete_error(self)) } #[named] @@ -1795,7 +798,7 @@ impl EC2 { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); - let mut aws_zones = aws_zones(zones, ®ion, &event_details)?; + let aws_zones = aws_zones(zones, ®ion, &event_details)?; let s3 = s3(&context, ®ion, cloud_provider.as_ref()); // copy listeners from CloudProvider @@ -1817,127 +820,309 @@ impl EC2 { listeners, }) } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } } impl Kubernetes for EC2 { fn context(&self) -> &Context { - todo!() + &self.context } fn kind(&self) -> Kind { - todo!() + Kind::Ec2 } fn id(&self) -> &str { - todo!() + self.id.as_str() } fn name(&self) -> &str { - todo!() + self.name.as_str() } fn version(&self) -> &str { - todo!() + self.version.as_str() } fn region(&self) -> String { - todo!() + self.region.to_aws_format() } fn zone(&self) -> &str { - todo!() + "" } fn aws_zones(&self) -> Option> { - todo!() + Some(self.zones.clone()) } fn cloud_provider(&self) -> &dyn CloudProvider { - todo!() + (*self.cloud_provider).borrow() } fn dns_provider(&self) -> &dyn DnsProvider { - todo!() + (*self.dns_provider).borrow() } fn logger(&self) -> &dyn Logger { - todo!() + self.logger.borrow() } fn config_file_store(&self) -> &dyn ObjectStorage { - todo!() + &self.s3 } fn is_valid(&self) -> Result<(), EngineError> { - todo!() + Ok(()) } + #[named] fn on_create(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &vec![], + &self.options, + ) + }) } + #[named] fn on_create_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || create_error(self)) } - fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - todo!() + fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + // TODO + Ok(()) } + #[named] fn on_upgrade(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) } + #[named] fn on_upgrade_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) } + #[named] fn on_downgrade(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || downgrade()) } + #[named] fn on_downgrade_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) } + #[named] fn on_pause(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) } + #[named] fn on_pause_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || pause_error(self)) } + #[named] fn on_delete(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) } + #[named] fn on_delete_error(&self) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || delete_error(self)) } + #[named] fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::deploy_environment(self, environment, event_details, self.logger()) } + #[named] fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) } + #[named] fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::pause_environment(self, environment, event_details, self.logger()) } - fn pause_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } + #[named] fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + kubernetes::delete_environment(self, environment, event_details, self.logger()) } - fn delete_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - todo!() + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) } } @@ -2005,3 +1190,1081 @@ fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box Result { + if subnet_block.len() % 2 == 1 { + return Err(EngineError::new_subnets_count_is_not_even( + event_details, + zone_name.to_string(), + subnet_block.len(), + )); + } + + Ok((subnet_block.len() / 2) as usize) +} + +fn lets_encrypt_url(context: &Context) -> String { + match context.is_test_cluster() { + true => "https://acme-staging-v02.api.letsencrypt.org/directory", + false => "https://acme-v02.api.letsencrypt.org/directory", + } + .to_string() +} + +fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> String { + let managed_dns_resolvers = dns_provider + .resolvers() + .iter() + .map(|x| format!("{}", x.clone())) + .collect::>(); + + terraform_list_format(managed_dns_resolvers) +} + +fn tera_context( + kubernetes: &dyn Kubernetes, + zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); + let mut context = TeraContext::new(); + + let format_ips = + |ips: &Vec| -> Vec { ips.iter().map(|ip| format!("\"{}\"", ip)).collect::>() }; + + let aws_zones = zones + .iter() + .map(|zone| zone.to_terraform_format_string()) + .collect::>(); + + let mut eks_zone_a_subnet_blocks_private = format_ips(&options.eks_zone_a_subnet_blocks); + let mut eks_zone_b_subnet_blocks_private = format_ips(&options.eks_zone_b_subnet_blocks); + let mut eks_zone_c_subnet_blocks_private = format_ips(&options.eks_zone_c_subnet_blocks); + + match options.vpc_qovery_network_mode { + VpcQoveryNetworkMode::WithNatGateways => { + let max_subnet_zone_a = check_odd_subnets(event_details.clone(), "a", &eks_zone_a_subnet_blocks_private)?; + let max_subnet_zone_b = check_odd_subnets(event_details.clone(), "b", &eks_zone_b_subnet_blocks_private)?; + let max_subnet_zone_c = check_odd_subnets(event_details.clone(), "c", &eks_zone_c_subnet_blocks_private)?; + + let eks_zone_a_subnet_blocks_public: Vec = + eks_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); + let eks_zone_b_subnet_blocks_public: Vec = + eks_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); + let eks_zone_c_subnet_blocks_public: Vec = + eks_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); + + context.insert("eks_zone_a_subnet_blocks_public", &eks_zone_a_subnet_blocks_public); + context.insert("eks_zone_b_subnet_blocks_public", &eks_zone_b_subnet_blocks_public); + context.insert("eks_zone_c_subnet_blocks_public", &eks_zone_c_subnet_blocks_public); + } + VpcQoveryNetworkMode::WithoutNatGateways => {} + }; + + context.insert("vpc_qovery_network_mode", &options.vpc_qovery_network_mode.to_string()); + + let rds_zone_a_subnet_blocks = format_ips(&options.rds_zone_a_subnet_blocks); + let rds_zone_b_subnet_blocks = format_ips(&options.rds_zone_b_subnet_blocks); + let rds_zone_c_subnet_blocks = format_ips(&options.rds_zone_c_subnet_blocks); + + let documentdb_zone_a_subnet_blocks = format_ips(&options.documentdb_zone_a_subnet_blocks); + let documentdb_zone_b_subnet_blocks = format_ips(&options.documentdb_zone_b_subnet_blocks); + let documentdb_zone_c_subnet_blocks = format_ips(&options.documentdb_zone_c_subnet_blocks); + + let elasticache_zone_a_subnet_blocks = format_ips(&options.elasticache_zone_a_subnet_blocks); + let elasticache_zone_b_subnet_blocks = format_ips(&options.elasticache_zone_b_subnet_blocks); + let elasticache_zone_c_subnet_blocks = format_ips(&options.elasticache_zone_c_subnet_blocks); + + let elasticsearch_zone_a_subnet_blocks = format_ips(&options.elasticsearch_zone_a_subnet_blocks); + let elasticsearch_zone_b_subnet_blocks = format_ips(&options.elasticsearch_zone_b_subnet_blocks); + let elasticsearch_zone_c_subnet_blocks = format_ips(&options.elasticsearch_zone_c_subnet_blocks); + + let region_cluster_id = format!("{}-{}", kubernetes.region(), kubernetes.id()); + let vpc_cidr_block = options.vpc_cidr_block.clone(); + let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", kubernetes.id()); + let eks_cidr_subnet = options.eks_cidr_subnet.clone(); + + let eks_access_cidr_blocks = format_ips(&options.eks_access_cidr_blocks); + + let qovery_api_url = options.qovery_api_url.clone(); + let rds_cidr_subnet = options.rds_cidr_subnet.clone(); + let documentdb_cidr_subnet = options.documentdb_cidr_subnet.clone(); + let elasticache_cidr_subnet = options.elasticache_cidr_subnet.clone(); + let elasticsearch_cidr_subnet = options.elasticsearch_cidr_subnet.clone(); + + // Qovery + context.insert("organization_id", kubernetes.cloud_provider().organization_id()); + context.insert("qovery_api_url", &qovery_api_url); + + context.insert("engine_version_controller_token", &options.engine_version_controller_token); + context.insert("agent_version_controller_token", &options.agent_version_controller_token); + + context.insert("test_cluster", &kubernetes.context().is_test_cluster()); + + if let Some(resource_expiration_in_seconds) = kubernetes.context().resource_expiration_in_seconds() { + context.insert("resource_expiration_in_seconds", &resource_expiration_in_seconds); + } + + context.insert("force_upgrade", &kubernetes.context().requires_forced_upgrade()); + + // Qovery features + context.insert( + "log_history_enabled", + &kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ); + context.insert( + "metrics_history_enabled", + &kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + ); + + // DNS configuration + let managed_dns_list = vec![kubernetes.dns_provider().name()]; + let managed_dns_domains_helm_format = vec![kubernetes.dns_provider().domain().to_string()]; + let managed_dns_domains_root_helm_format = vec![kubernetes.dns_provider().domain().root_domain().to_string()]; + let managed_dns_domains_terraform_format = + terraform_list_format(vec![kubernetes.dns_provider().domain().to_string()]); + let managed_dns_domains_root_terraform_format = + terraform_list_format(vec![kubernetes.dns_provider().domain().root_domain().to_string()]); + let managed_dns_resolvers_terraform_format = managed_dns_resolvers_terraform_format(kubernetes.dns_provider()); + + context.insert("managed_dns", &managed_dns_list); + context.insert("managed_dns_domains_helm_format", &managed_dns_domains_helm_format); + context.insert("managed_dns_domains_root_helm_format", &managed_dns_domains_root_helm_format); + context.insert("managed_dns_domains_terraform_format", &managed_dns_domains_terraform_format); + context.insert( + "managed_dns_domains_root_terraform_format", + &managed_dns_domains_root_terraform_format, + ); + context.insert( + "managed_dns_resolvers_terraform_format", + &managed_dns_resolvers_terraform_format, + ); + + match kubernetes.dns_provider().kind() { + dns_provider::Kind::Cloudflare => { + context.insert("external_dns_provider", kubernetes.dns_provider().provider_name()); + context.insert("cloudflare_api_token", kubernetes.dns_provider().token()); + context.insert("cloudflare_email", kubernetes.dns_provider().account()); + } + }; + + context.insert("dns_email_report", &options.tls_email_report); + + // TLS + context.insert("acme_server_url", &lets_encrypt_url(kubernetes.context())); + + // Vault + context.insert("vault_auth_method", "none"); + + if env::var_os("VAULT_ADDR").is_some() { + // select the correct used method + match env::var_os("VAULT_ROLE_ID") { + Some(role_id) => { + context.insert("vault_auth_method", "app_role"); + context.insert("vault_role_id", role_id.to_str().unwrap()); + + match env::var_os("VAULT_SECRET_ID") { + Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), + None => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_missing_required_env_variable(event_details, "VAULT_SECRET_ID".to_string()), + None, + )), + } + } + None => { + if env::var_os("VAULT_TOKEN").is_some() { + context.insert("vault_auth_method", "token") + } + } + } + }; + + // Other Kubernetes + context.insert("kubernetes_cluster_name", &kubernetes.cluster_name()); + context.insert("enable_cluster_autoscaler", &true); + + // AWS + context.insert("aws_access_key", &kubernetes.cloud_provider().access_key_id()); + context.insert("aws_secret_key", &kubernetes.cloud_provider().secret_access_key()); + + // AWS S3 tfstate storage + context.insert( + "aws_access_key_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .access_key_id + .as_str(), + ); + + context.insert( + "aws_secret_key_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .secret_access_key + .as_str(), + ); + context.insert( + "aws_region_tfstates_account", + kubernetes + .cloud_provider() + .terraform_state_credentials() + .region + .as_str(), + ); + + context.insert("aws_region", &kubernetes.region()); + context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); + context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); + context.insert("vpc_cidr_block", &vpc_cidr_block); + context.insert("vpc_custom_routing_table", &options.vpc_custom_routing_table); + context.insert("s3_kubeconfig_bucket", &format!("qovery-kubeconfigs-{}", kubernetes.id())); + + // AWS - EKS + context.insert("aws_availability_zones", &aws_zones); + context.insert("eks_cidr_subnet", &eks_cidr_subnet); + context.insert("kubernetes_cluster_name", kubernetes.name()); + context.insert("kubernetes_cluster_id", kubernetes.id()); + context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); + context.insert("eks_region_cluster_id", region_cluster_id.as_str()); + context.insert("eks_worker_nodes", &node_groups); // FIXME + context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); + context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); + context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); + context.insert("eks_masters_version", &kubernetes.version()); + context.insert("eks_workers_version", &kubernetes.version()); + context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); + context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); + + // AWS - RDS + context.insert("rds_cidr_subnet", &rds_cidr_subnet); + context.insert("rds_zone_a_subnet_blocks", &rds_zone_a_subnet_blocks); + context.insert("rds_zone_b_subnet_blocks", &rds_zone_b_subnet_blocks); + context.insert("rds_zone_c_subnet_blocks", &rds_zone_c_subnet_blocks); + + // AWS - DocumentDB + context.insert("documentdb_cidr_subnet", &documentdb_cidr_subnet); + context.insert("documentdb_zone_a_subnet_blocks", &documentdb_zone_a_subnet_blocks); + context.insert("documentdb_zone_b_subnet_blocks", &documentdb_zone_b_subnet_blocks); + context.insert("documentdb_zone_c_subnet_blocks", &documentdb_zone_c_subnet_blocks); + + // AWS - Elasticache + context.insert("elasticache_cidr_subnet", &elasticache_cidr_subnet); + context.insert("elasticache_zone_a_subnet_blocks", &elasticache_zone_a_subnet_blocks); + context.insert("elasticache_zone_b_subnet_blocks", &elasticache_zone_b_subnet_blocks); + context.insert("elasticache_zone_c_subnet_blocks", &elasticache_zone_c_subnet_blocks); + + // AWS - Elasticsearch + context.insert("elasticsearch_cidr_subnet", &elasticsearch_cidr_subnet); + context.insert("elasticsearch_zone_a_subnet_blocks", &elasticsearch_zone_a_subnet_blocks); + context.insert("elasticsearch_zone_b_subnet_blocks", &elasticsearch_zone_b_subnet_blocks); + context.insert("elasticsearch_zone_c_subnet_blocks", &elasticsearch_zone_c_subnet_blocks); + + // grafana credentials + context.insert("grafana_admin_user", options.grafana_admin_user.as_str()); + context.insert("grafana_admin_password", options.grafana_admin_password.as_str()); + + // qovery + context.insert("qovery_api_url", options.qovery_api_url.as_str()); + context.insert("qovery_nats_url", options.qovery_nats_url.as_str()); + context.insert("qovery_nats_user", options.qovery_nats_user.as_str()); + context.insert("qovery_nats_password", options.qovery_nats_password.as_str()); + context.insert("qovery_ssh_key", options.qovery_ssh_key.as_str()); + context.insert("discord_api_key", options.discord_api_key.as_str()); + + Ok(context) +} + +fn create( + kubernetes: &dyn Kubernetes, + kubernetes_long_id: uuid::Uuid, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing EKS cluster deployment.".to_string()), + )); + + kubernetes.send_to_customer( + format!( + "Preparing {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + // upgrade cluster instead if required + match kubernetes.get_kubeconfig_file() { + Ok((path, _)) => match is_kubernetes_upgrade_required( + path, + kubernetes.version(), + kubernetes.cloud_provider().credentials_environment_variables(), + event_details.clone(), + kubernetes.logger(), + ) { + Ok(x) => { + if x.required_upgrade_on.is_some() { + return kubernetes.upgrade_with_status(x); + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Kubernetes cluster upgrade not required".to_string()), + )) + } + Err(e) => { + kubernetes.logger().log(EngineEvent::Error(e, Some(EventMessage::new_from_safe( + "Error detected, upgrade won't occurs, but standard deployment.".to_string(), + )))); + } + }, + Err(_) => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) + + }; + + // create AWS IAM roles + let already_created_roles = get_default_roles_to_create(); + for role in already_created_roles { + match role.create_service_linked_role( + kubernetes.cloud_provider().access_key_id().as_str(), + kubernetes.cloud_provider().secret_access_key().as_str(), + ) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Role {} is already present, no need to create", role.role_name)), + )), + Err(e) => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_cannot_get_or_create_iam_role(event_details.clone(), role.role_name, e), + None, + )), + } + } + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Deploying {} cluster.", kubernetes.kind())), + )); + + kubernetes.send_to_customer( + format!( + "Deploying {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + // temporary: remove helm/kube management from terraform + match terraform_init_validate_state_list(temp_dir.as_str()) { + Ok(x) => { + let items_type = vec!["helm_release", "kubernetes_namespace"]; + for item in items_type { + for entry in x.clone() { + if entry.starts_with(item) { + match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + )), + Err(e) => { + return Err(EngineError::new_terraform_cannot_remove_entry_out( + event_details, + entry.to_string(), + e, + )) + } + } + }; + } + } + } + Err(e) => kubernetes.logger().log(EngineEvent::Error( + EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), + None, + )), + }; + + // terraform deployment dedicated to cloud resources + if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), kubernetes.context().is_dry_run_deploy()) { + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + + // kubernetes helm deployments on the cluster + // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform + let kubeconfig_path = kubernetes.get_kubeconfig_file_path()?; + let kubeconfig_path = Path::new(&kubeconfig_path); + + let credentials_environment_variables: Vec<(String, String)> = kubernetes + .cloud_provider() + .credentials_environment_variables() + .into_iter() + .map(|x| (x.0.to_string(), x.1.to_string())) + .collect(); + + let charts_prerequisites = ChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(kubernetes.dns_provider()), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), + )); + + let helm_charts_to_deploy = aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; + + deploy_charts_levels( + kubeconfig_path, + &credentials_environment_variables, + helm_charts_to_deploy, + kubernetes.context().is_dry_run_deploy(), + ) + .map_err(|e| EngineError::new_helm_charts_deploy_error(event_details.clone(), e)) +} + +fn create_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + let (kubeconfig_path, _) = kubernetes.get_kubeconfig_file()?; + let environment_variables = kubernetes.cloud_provider().credentials_environment_variables(); + + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)), + EventMessage::new_from_safe(format!("{}.create_error() called.", kubernetes.kind())), + )); + + match kubectl_exec_get_events(kubeconfig_path, None, environment_variables) { + Ok(ok_line) => kubernetes + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new(ok_line, None))), + Err(err) => kubernetes.logger().log(EngineEvent::Warning( + event_details, + EventMessage::new( + "Error trying to get kubernetes events".to_string(), + Some(err.message(ErrorMessageVerbosity::FullDetails)), + ), + )), + }; + + Ok(()) +} + +fn upgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)), + EventMessage::new_from_safe(format!("{}.upgrade_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn downgrade() -> Result<(), EngineError> { + Ok(()) +} + +fn downgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)), + EventMessage::new_from_safe(format!("{}.downgrade_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn pause( + kubernetes: &dyn Kubernetes, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + + kubernetes.send_to_customer( + format!( + "Preparing {} {} cluster pause with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + kubernetes.logger().log(EngineEvent::Info( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe(format!("Preparing {} cluster pause.", kubernetes.kind())), + )); + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let mut context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + // pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc... + let worker_nodes: Vec = Vec::new(); + context.insert("eks_worker_nodes", &worker_nodes); + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + // pause: only select terraform workers elements to pause to avoid applying on the whole config + // this to avoid failures because of helm deployments on removing workers nodes + let tf_workers_resources = match terraform_init_validate_state_list(temp_dir.as_str()) { + Ok(x) => { + let mut tf_workers_resources_name = Vec::new(); + for name in x { + if name.starts_with("aws_eks_node_group.") { + tf_workers_resources_name.push(name); + } + } + tf_workers_resources_name + } + Err(e) => { + let error = EngineError::new_terraform_state_does_not_exist(event_details, e); + kubernetes.logger().log(EngineEvent::Error(error.clone(), None)); + return Err(error); + } + }; + + if tf_workers_resources.is_empty() { + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, None)); + } + + let kubernetes_config_file_path = kubernetes.get_kubeconfig_file_path()?; + + // pause: wait 1h for the engine to have 0 running jobs before pausing and avoid getting unreleased lock (from helm or terraform for example) + if options.qovery_engine_location == EngineLocation::ClientSide { + match kubernetes.context().is_feature_enabled(&Features::MetricsHistory) { + true => { + let metric_name = "taskmanager_nb_running_tasks"; + let wait_engine_job_finish = retry::retry(Fixed::from_millis(60000).take(60), || { + return match kubectl_exec_api_custom_metrics( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + "qovery", + None, + metric_name, + ) { + Ok(metrics) => { + let mut current_engine_jobs = 0; + + for metric in metrics.items { + match metric.value.parse::() { + Ok(job_count) if job_count > 0 => current_engine_jobs += 1, + Err(e) => { + return OperationResult::Retry(EngineError::new_cannot_get_k8s_api_custom_metrics( + event_details.clone(), + CommandError::new("Error while looking at the API metric value".to_string(), Some(e.to_string()), None))); + } + _ => {} + } + } + + if current_engine_jobs == 0 { + OperationResult::Ok(()) + } else { + OperationResult::Retry(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details.clone(), None)) + } + } + Err(e) => { + OperationResult::Retry( + EngineError::new_cannot_get_k8s_api_custom_metrics(event_details.clone(), e)) + } + }; + }); + + match wait_engine_job_finish { + Ok(_) => { + kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); + } + Err(Operation { error, .. }) => { + return Err(error) + } + Err(retry::Error::Internal(msg)) => { + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) + } + } + } + false => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Engines are running Client side, but metric history flag is disabled. You will encounter issues during cluster lifecycles if you do not enable metric history".to_string()))), + } + } + + let mut terraform_args_string = vec!["apply".to_string(), "-auto-approve".to_string()]; + for x in tf_workers_resources { + terraform_args_string.push(format!("-target={}", x)); + } + let terraform_args = terraform_args_string.iter().map(|x| &**x).collect(); + + kubernetes.send_to_customer( + format!( + "Pausing {} {} cluster deployment with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), + )); + + match terraform_exec(temp_dir.as_str(), terraform_args) { + Ok(_) => { + let message = format!("Kubernetes cluster {} successfully paused", kubernetes.name()); + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); + Ok(()) + } + Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), + } +} + +fn pause_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), + EventMessage::new_from_safe(format!("{}.pause_error() called.", kubernetes.kind())), + )); + + Ok(()) +} + +fn delete( + kubernetes: &dyn Kubernetes, + template_directory: &str, + aws_zones: &Vec, + node_groups: &Vec, + options: &Options, +) -> Result<(), EngineError> { + let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + let listeners_helper = ListenersHelper::new(kubernetes.listeners()); + let mut skip_kubernetes_step = false; + + kubernetes.send_to_customer( + format!( + "Preparing to delete {} cluster {} with id {}", + kubernetes.kind(), + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Preparing to delete {} cluster.", kubernetes.kind())), + )); + + let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let context = tera_context(kubernetes, aws_zones, node_groups, options)?; + + if let Err(e) = + crate::template::generate_and_copy_all_files_into_dir(template_directory, temp_dir.as_str(), context) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + bootstrap_charts_dir, + common_charts_temp_dir, + e, + )); + } + + let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() { + Ok(x) => x, + Err(e) => { + let safe_message = "Skipping Kubernetes uninstall because it can't be reached."; + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(safe_message.to_string(), Some(e.message(ErrorMessageVerbosity::FullDetails))), + )); + + skip_kubernetes_step = true; + "".to_string() + } + }; + + // should apply before destroy to be sure destroy will compute on all resources + // don't exit on failure, it can happen if we resume a destroy process + let message = format!( + "Ensuring everything is up to date before deleting cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform apply before running a delete.".to_string()), + )); + + if let Err(e) = cmd::terraform::terraform_init_validate_plan_apply(temp_dir.as_str(), false) { + // An issue occurred during the apply before destroy of Terraform, it may be expected if you're resuming a destroy + kubernetes.logger().log(EngineEvent::Error( + EngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e), + None, + )); + }; + + if !skip_kubernetes_step { + // should make the diff between all namespaces and qovery managed namespaces + let message = format!( + "Deleting all non-Qovery deployed applications and dependencies for cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(message.to_string()), + )); + + kubernetes.send_to_customer(&message, &listeners_helper); + + let all_namespaces = kubectl_exec_get_all_namespaces( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + ); + + match all_namespaces { + Ok(namespace_vec) => { + let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect(); + let namespaces_to_delete = get_firsts_namespaces_to_delete(namespaces_as_str); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting non Qovery namespaces".to_string()), + )); + + for namespace_to_delete in namespaces_to_delete.iter() { + match cmd::kubectl::kubectl_exec_delete_namespace( + &kubernetes_config_file_path, + namespace_to_delete, + kubernetes.cloud_provider().credentials_environment_variables(), + ) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Namespace `{}` deleted successfully.", + namespace_to_delete + )), + )), + Err(e) => { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!( + "Can't delete the namespace `{}`", + namespace_to_delete + )), + )); + } + } + } + } + } + Err(e) => { + let message_safe = format!( + "Error while getting all namespaces for Kubernetes cluster {}", + kubernetes.name_with_id(), + ); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), + )); + } + } + + let message = format!( + "Deleting all Qovery deployed elements and associated dependencies for cluster {}/{}", + kubernetes.name(), + kubernetes.id() + ); + + kubernetes.send_to_customer(&message, &listeners_helper); + + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + // delete custom metrics api to avoid stale namespaces on deletion + let helm = Helm::new( + &kubernetes_config_file_path, + &kubernetes.cloud_provider().credentials_environment_variables(), + ) + .map_err(|e| to_engine_error(&event_details, e))?; + let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system"); + helm.uninstall(&chart, &[]) + .map_err(|e| to_engine_error(&event_details, e))?; + + // required to avoid namespace stuck on deletion + uninstall_cert_manager( + &kubernetes_config_file_path, + kubernetes.cloud_provider().credentials_environment_variables(), + event_details.clone(), + kubernetes.logger(), + )?; + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed helm charts".to_string()), + )); + + let qovery_namespaces = get_qovery_managed_namespaces(); + for qovery_namespace in qovery_namespaces.iter() { + let charts_to_delete = helm + .list_release(Some(qovery_namespace), &[]) + .map_err(|e| to_engine_error(&event_details, e))?; + + for chart in charts_to_delete { + let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); + match helm.uninstall(&chart_info, &[]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), + Err(e) => { + let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) + } + } + } + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Deleting Qovery managed namespaces".to_string()), + )); + + for qovery_namespace in qovery_namespaces.iter() { + let deletion = cmd::kubectl::kubectl_exec_delete_namespace( + &kubernetes_config_file_path, + qovery_namespace, + kubernetes.cloud_provider().credentials_environment_variables(), + ); + match deletion { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Namespace {} is fully deleted", qovery_namespace)), + )), + Err(e) => { + if !(e.message(ErrorMessageVerbosity::FullDetails).contains("not found")) { + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new_from_safe(format!("Can't delete namespace {}.", qovery_namespace)), + )) + } + } + } + } + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Delete all remaining deployed helm applications".to_string()), + )); + + match helm.list_release(None, &[]) { + Ok(helm_charts) => { + for chart in helm_charts { + let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace); + match helm.uninstall(&chart_info, &[]) { + Ok(_) => kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), + )), + Err(e) => { + let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe, Some(e.to_string())), + )) + } + } + } + } + Err(e) => { + let message_safe = "Unable to get helm list"; + kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(message_safe.to_string(), Some(e.to_string())), + )) + } + } + }; + + let message = format!("Deleting Kubernetes cluster {}/{}", kubernetes.name(), kubernetes.id()); + kubernetes.send_to_customer(&message, &listeners_helper); + kubernetes + .logger() + .log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe(message))); + + kubernetes.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Running Terraform destroy".to_string()), + )); + + match retry::retry( + Fibonacci::from_millis(60000).take(3), + || match cmd::terraform::terraform_init_validate_destroy(temp_dir.as_str(), false) { + Ok(_) => OperationResult::Ok(()), + Err(e) => OperationResult::Retry(e), + }, + ) { + Ok(_) => { + kubernetes.send_to_customer( + format!( + "Kubernetes cluster {}/{} successfully deleted", + kubernetes.name(), + kubernetes.id() + ) + .as_str(), + &listeners_helper, + ); + kubernetes.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe("Kubernetes cluster successfully deleted".to_string()), + )); + Ok(()) + } + Err(Operation { error, .. }) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + error, + )), + Err(retry::Error::Internal(msg)) => Err(EngineError::new_terraform_error_while_executing_destroy_pipeline( + event_details, + CommandError::new("Error while trying to perform Terraform destroy".to_string(), Some(msg), None), + )), + } +} + +fn delete_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { + kubernetes.logger().log(EngineEvent::Warning( + kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)), + EventMessage::new_from_safe(format!("{}.delete_error() called.", kubernetes.kind())), + )); + + Ok(()) +} diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 36fb944c..8a692f45 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -5,6 +5,7 @@ use const_format::formatcp; use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; use qovery_engine::cloud_provider::Kind::Aws; @@ -60,16 +61,19 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> &context, logger, AWS_TEST_REGION.to_string().as_str(), + KKind::Eks, AWS_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, ) } + impl Cluster for AWS { fn docker_cr_engine( context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -84,10 +88,11 @@ impl Cluster for AWS { let cloud_provider: Arc> = Arc::new(AWS::cloud_provider(context)); let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); - let k = get_environment_test_kubernetes( + let kubernetes = get_environment_test_kubernetes( Aws, context, cloud_provider.clone(), + kubernetes_kind, dns_provider.clone(), logger.clone(), localisation, @@ -101,7 +106,7 @@ impl Cluster for AWS { container_registry, cloud_provider, dns_provider, - k, + kubernetes, ) } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 7c2b440f..2b639604 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -20,17 +20,18 @@ use crate::utilities::{ FuncTestsSecrets, }; use base64; -use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EKS}; +use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EC2, EKS}; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; use qovery_engine::cloud_provider::environment::Environment; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, Edge, Kind}; +use qovery_engine::cloud_provider::{CloudProvider, Kind}; use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas; use qovery_engine::cmd::structs::SVCItem; use qovery_engine::engine::EngineConfig; @@ -62,6 +63,7 @@ pub trait Cluster { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -1136,14 +1138,14 @@ pub fn test_db( Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), - Kind::Edge(Edge::Aws) => (AWS_TEST_REGION.to_string(), AWS_K3S_VERSION.to_string()), }; let engine_config = match provider_kind { - Kind::Aws | Kind::Edge(Edge::Aws) => AWS::docker_cr_engine( + Kind::Aws => AWS::docker_cr_engine( &context, logger.clone(), localisation.as_str(), + KKind::Eks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1152,6 +1154,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), + KKind::Doks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1160,6 +1163,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), + KKind::ScwKapsule, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1224,6 +1228,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::Eks, kubernetes_version, &ClusterDomain::Default, None, @@ -1232,6 +1237,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::Doks, kubernetes_version, &ClusterDomain::Default, None, @@ -1240,6 +1246,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), + KKind::ScwKapsule, kubernetes_version, &ClusterDomain::Default, None, @@ -1256,6 +1263,7 @@ pub fn get_environment_test_kubernetes<'a>( provider_kind: Kind, context: &Context, cloud_provider: Arc>, + kubernetes_kind: KKind, dns_provider: Arc>, logger: Box, localisation: &str, @@ -1263,16 +1271,16 @@ pub fn get_environment_test_kubernetes<'a>( vpc_network_mode: Option, ) -> Box { let secrets = FuncTestsSecrets::new(); - let k: Box; - match provider_kind { - Kind::Aws => { + let kubernetes: Box = match kubernetes_kind { + KKind::Eks => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); if vpc_network_mode.is_some() { options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); } - k = Box::new( + + Box::new( EKS::new( context.clone(), context.cluster_id(), @@ -1288,11 +1296,35 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Do => { + KKind::Ec2 => { + let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); + let mut options = AWS::kubernetes_cluster_options(secrets, None); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } + + Box::new( + EC2::new( + context.clone(), + context.cluster_id(), + uuid::Uuid::new_v4(), + format!("qovery-{}", context.cluster_id()).as_str(), + kubernetes_version, + region.clone(), + region.get_zones_to_string(), + cloud_provider, + dns_provider, + options, + logger, + ) + .unwrap(), + ) + } + KKind::Doks => { let region = DoRegion::from_str(localisation).expect("DO region not supported"); - k = Box::new( + Box::new( DOKS::new( context.clone(), context.cluster_id().to_string(), @@ -1307,11 +1339,11 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Scw => { + KKind::ScwKapsule => { let zone = ScwZone::from_str(localisation).expect("SCW zone not supported"); - k = Box::new( + Box::new( Kapsule::new( context.clone(), context.cluster_id().to_string(), @@ -1326,11 +1358,11 @@ pub fn get_environment_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - } + }; - return k; + return kubernetes; } pub fn get_cluster_test_kubernetes<'a>( @@ -1419,6 +1451,7 @@ pub fn get_cluster_test_kubernetes<'a>( pub fn cluster_test( test_name: &str, provider_kind: Kind, + kubernetes_kind: KKind, context: Context, logger: Box, localisation: &str, @@ -1441,6 +1474,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1449,6 +1483,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1457,6 +1492,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + kubernetes_kind, boot_version, cluster_domain, vpc_network_mode.clone(), @@ -1547,6 +1583,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::Eks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1555,6 +1592,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::Doks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1563,6 +1601,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, + KKind::ScwKapsule, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 36a5db93..e81e336f 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -3,6 +3,7 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; use qovery_engine::cloud_provider::digitalocean::DO; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; @@ -13,6 +14,7 @@ use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; +use qovery_engine::cloud_provider::kubernetes::Kind; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; @@ -48,6 +50,7 @@ pub fn do_default_engine_config(context: &Context, logger: Box) -> E &context, logger, DO_TEST_REGION.to_string().as_str(), + KKind::Doks, DO_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -59,6 +62,7 @@ impl Cluster for DO { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -76,6 +80,7 @@ impl Cluster for DO { Do, context, cloud_provider.clone(), + kubernetes_kind, dns_provider.clone(), logger.clone(), localisation, diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index d3c570bf..4db9b9ff 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -1,28 +1,31 @@ -use const_format::formatcp; -use qovery_engine::build_platform::Build; -use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; -use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; -use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; -use qovery_engine::engine::EngineConfig; -use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; -use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use std::sync::Arc; -use crate::cloudflare::dns_provider_cloudflare; -use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; +use const_format::formatcp; +use tracing::error; -use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use qovery_engine::build_platform::Build; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::kubernetes::Kind; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; +use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; +use qovery_engine::cloud_provider::scaleway::Scaleway; use qovery_engine::cloud_provider::Kind::Scw; +use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::errors::ContainerRegistryError; +use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; use qovery_engine::container_registry::ContainerRegistry; use qovery_engine::dns_provider::DnsProvider; +use qovery_engine::engine::EngineConfig; +use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::logger::Logger; use qovery_engine::models::scaleway::ScwZone; -use tracing::error; +use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; + +use crate::cloudflare::dns_provider_cloudflare; +use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; pub const SCW_KUBERNETES_MAJOR_VERSION: u8 = 1; @@ -69,6 +72,7 @@ pub fn scw_default_engine_config(context: &Context, logger: Box) -> &context, logger, SCW_TEST_ZONE.to_string().as_str(), + KKind::ScwKapsule, SCW_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -80,6 +84,7 @@ impl Cluster for Scaleway { context: &Context, logger: Box, localisation: &str, + kubernetes_kind: KKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -98,6 +103,7 @@ impl Cluster for Scaleway { Scw, context, cloud_provider.clone(), + Kind::ScwKapsule, dns_provider.clone(), logger.clone(), localisation, diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index 53f790b8..e403f598 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -1,14 +1,18 @@ extern crate test_utilities; -use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; -use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; +use std::str::FromStr; + use ::function_name::named; +use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; -use std::str::FromStr; -use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + +use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; +use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; #[cfg(feature = "test-aws-infra")] fn create_and_destroy_eks_cluster( @@ -25,6 +29,7 @@ fn create_and_destroy_eks_cluster( cluster_test( test_name, Kind::Aws, + KKind::Eks, context( generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str(), diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index 9dbf76d3..754072bc 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -1,6 +1,7 @@ use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::WithNatGateways; use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; use test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; @@ -38,6 +39,7 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { cluster_test( function_name!(), Kind::Aws, + KKind::Eks, context.clone(), logger(), region, diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/edge/aws/edge_aws_kubernetes.rs index fbfdacb4..a46c9368 100644 --- a/tests/edge/aws/edge_aws_kubernetes.rs +++ b/tests/edge/aws/edge_aws_kubernetes.rs @@ -3,6 +3,8 @@ extern crate test_utilities; use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; + use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; @@ -27,6 +29,7 @@ fn create_and_destroy_edge_aws_cluster( cluster_test( test_name, Kind::Aws, + KKind::Ec2, context( generate_id().as_str(), generate_cluster_id(region.to_string().as_str()).as_str(), diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 952cc24d..5f939656 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -4,6 +4,8 @@ use self::test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETE use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; + use qovery_engine::cloud_provider::Kind; use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; @@ -21,6 +23,7 @@ fn create_and_destroy_kapsule_cluster( cluster_test( test_name, Kind::Scw, + KKind::ScwKapsule, context(generate_id().as_str(), generate_cluster_id(zone.as_str()).as_str()), logger(), zone.as_str(), diff --git a/tests/scaleway/scw_whole_enchilada.rs b/tests/scaleway/scw_whole_enchilada.rs index bef7262d..f50f2d96 100644 --- a/tests/scaleway/scw_whole_enchilada.rs +++ b/tests/scaleway/scw_whole_enchilada.rs @@ -1,7 +1,9 @@ use ::function_name::named; +use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::Kind; use qovery_engine::models::scaleway::ScwZone; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; + use test_utilities::scaleway::{SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger, FuncTestsSecrets}; @@ -32,6 +34,7 @@ fn create_and_destroy_kapsule_cluster_with_env_in_par_2() { cluster_test( function_name!(), Kind::Scw, + KKind::ScwKapsule, context.clone(), logger, zone.as_str(), From 259a7a609382da40033b4e12a3a18e3d183aa13a Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:13:57 +0200 Subject: [PATCH 068/122] wip: fix multi kubernetes provider tests --- test_utilities/src/common.rs | 109 +++++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 44 deletions(-) diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 2b639604..8a8e857e 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -1375,21 +1375,21 @@ pub fn get_cluster_test_kubernetes<'a>( localisation: &str, aws_zones: Option>, cloud_provider: Arc>, + kubernetes_provider: KKind, dns_provider: Arc>, vpc_network_mode: Option, logger: Box, ) -> Box { - let k: Box; - - match provider_kind { - Kind::Aws => { + let kubernetes: Box = match kubernetes_provider { + KKind::Eks => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); if vpc_network_mode.is_some() { options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); - k = Box::new( + + Box::new( EKS::new( context.clone(), cluster_id.as_str(), @@ -1405,47 +1405,68 @@ pub fn get_cluster_test_kubernetes<'a>( logger, ) .unwrap(), - ); + ) } - Kind::Do => { - k = Box::new( - DOKS::new( - context.clone(), - cluster_id.clone(), - uuid::Uuid::new_v4(), - cluster_name.clone(), - boot_version, - DoRegion::from_str(localisation.clone()).expect("Unknown region set for DOKS"), - cloud_provider, - dns_provider, - DO::kubernetes_nodes(), - DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), - logger, - ) - .unwrap(), - ); - } - Kind::Scw => { - k = Box::new( - Kapsule::new( - context.clone(), - cluster_id.clone(), - uuid::Uuid::new_v4(), - cluster_name.clone(), - boot_version, - ScwZone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"), - cloud_provider, - dns_provider, - Scaleway::kubernetes_nodes(), - Scaleway::kubernetes_cluster_options(secrets, None), - logger, - ) - .unwrap(), - ); - } - } + KKind::Ec2 => { + let mut options = AWS::kubernetes_cluster_options(secrets, None); + let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); + if vpc_network_mode.is_some() { + options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + } + let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); - return k; + Box::new( + EC2::new( + context.clone(), + cluster_id.as_str(), + uuid::Uuid::new_v4(), + cluster_name.as_str(), + boot_version.as_str(), + aws_region.clone(), + aws_zones, + cloud_provider, + dns_provider, + options, + logger, + ) + .unwrap(), + ) + } + KKind::Doks => Box::new( + DOKS::new( + context.clone(), + cluster_id.clone(), + uuid::Uuid::new_v4(), + cluster_name.clone(), + boot_version, + DoRegion::from_str(localisation.clone()).expect("Unknown region set for DOKS"), + cloud_provider, + dns_provider, + DO::kubernetes_nodes(), + DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), + logger, + ) + .unwrap(), + ), + KKind::ScwKapsule => Box::new( + Kapsule::new( + context.clone(), + cluster_id.clone(), + uuid::Uuid::new_v4(), + cluster_name.clone(), + boot_version, + ScwZone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"), + cloud_provider, + dns_provider, + Scaleway::kubernetes_nodes(), + Scaleway::kubernetes_cluster_options(secrets, None), + logger, + ) + .unwrap(), + ), + }; + + return kubernetes; } pub fn cluster_test( From d29de40e269788464a300a1fa2ed5a6cc9b2f896 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Thu, 21 Apr 2022 22:19:46 +0200 Subject: [PATCH 069/122] wip: fix clippy errors --- src/cloud_provider/aws/kubernetes/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 15eff3e6..e65ed05d 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -169,7 +169,7 @@ impl EKS { } } - let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + let s3 = s3(&context, ®ion, &**cloud_provider); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -580,7 +580,7 @@ impl Kubernetes for EKS { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || downgrade()) + send_progress_on_long_task(self, Action::Create, downgrade) } #[named] @@ -799,7 +799,7 @@ impl EC2 { let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; - let s3 = s3(&context, ®ion, cloud_provider.as_ref()); + let s3 = s3(&context, ®ion, &**cloud_provider); // copy listeners from CloudProvider let listeners = cloud_provider.listeners().clone(); @@ -964,7 +964,7 @@ impl Kubernetes for EC2 { event_details, self.logger(), ); - send_progress_on_long_task(self, Action::Create, || downgrade()) + send_progress_on_long_task(self, Action::Create, downgrade) } #[named] @@ -1178,7 +1178,7 @@ fn aws_zones( Ok(aws_zones) } -fn s3(context: &Context, region: &AwsRegion, cloud_provider: &Box) -> S3 { +fn s3(context: &Context, region: &AwsRegion, cloud_provider: &dyn CloudProvider) -> S3 { S3::new( context.clone(), "s3-temp-id".to_string(), @@ -1229,7 +1229,7 @@ fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> Str fn tera_context( kubernetes: &dyn Kubernetes, zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::LoadConfiguration)); From ab77b8f46f4a9ee93c7aa0d9b3722009fbc6920d Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 10:48:48 +0200 Subject: [PATCH 070/122] wip: add ec2 subnets --- .../backend.j2.tf | 0 .../documentdb.tf | 0 .../eks-vpc-common.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 0 .../elasticcache.tf | 0 .../elasticsearch.tf | 0 .../qovery-vault.j2.tf | 0 lib/aws/{bootstrap => bootstrap-ec2}/rds.tf | 0 .../s3-qovery-buckets.tf | 0 .../tf-default-vars.j2.tf | 0 .../tf-providers-aws.j2.tf | 0 .../{bootstrap => bootstrap-eks}/README.md | 0 lib/aws/bootstrap-eks/backend.j2.tf | 10 + .../chart_values/external-dns.j2.yaml | 0 .../chart_values/grafana.j2.yaml | 0 .../chart_values/kube-prometheus-stack.yaml | 0 .../chart_values/loki.yaml | 0 .../chart_values/metrics-server.yaml | 0 .../chart_values/nginx-ingress.yaml | 0 .../chart_values/pleco.yaml | 0 .../charts/aws-calico/.helmignore | 0 .../charts/aws-calico/Chart.yaml | 0 .../charts/aws-calico/README.md | 0 .../charts/aws-calico/crds/crds.yaml | 0 .../charts/aws-calico/templates/_helpers.tpl | 0 .../aws-calico/templates/config-map.yaml | 0 .../aws-calico/templates/daemon-set.yaml | 0 .../aws-calico/templates/deployment.yaml | 0 .../templates/pod-disruption-budget.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../charts/aws-calico/templates/rbac.yaml | 0 .../templates/service-accounts.yaml | 0 .../charts/aws-calico/templates/service.yaml | 0 .../charts/aws-calico/values.yaml | 0 .../charts/aws-limits-exporter/.helmignore | 0 .../charts/aws-limits-exporter/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../templates/secrets.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../charts/aws-limits-exporter/values.yaml | 0 .../aws-node-termination-handler/.helmignore | 0 .../aws-node-termination-handler/Chart.yaml | 0 .../aws-node-termination-handler/README.md | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/daemonset.yaml | 0 .../templates/psp.yaml | 0 .../templates/serviceaccount.yaml | 0 .../aws-node-termination-handler/values.yaml | 0 .../charts/aws-ui-view/.helmignore | 0 .../charts/aws-ui-view/Chart.yaml | 0 .../charts/aws-ui-view/templates/_helpers.tpl | 0 .../aws-ui-view/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../charts/aws-ui-view/values.yaml | 0 .../charts/aws-vpc-cni/.helmignore | 0 .../charts/aws-vpc-cni/Chart.yaml | 0 .../charts/aws-vpc-cni/README.md | 0 .../charts/aws-vpc-cni/templates/_helpers.tpl | 0 .../aws-vpc-cni/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../aws-vpc-cni/templates/configmap.yaml | 0 .../templates/customresourcedefinition.yaml | 0 .../aws-vpc-cni/templates/daemonset.yaml | 0 .../aws-vpc-cni/templates/eniconfig.yaml | 0 .../aws-vpc-cni/templates/serviceaccount.yaml | 0 .../charts/aws-vpc-cni/values.yaml | 0 .../charts/coredns-config/.helmignore | 0 .../charts/coredns-config/Chart.yaml | 0 .../coredns-config/templates/_helpers.tpl | 0 .../coredns-config/templates/configmap.yml | 0 .../charts/coredns-config/values.yaml | 0 .../charts/iam-eks-user-mapper/.helmignore | 0 .../charts/iam-eks-user-mapper/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../iam-eks-user-mapper/templates/rbac.yaml | 0 .../iam-eks-user-mapper/templates/secret.yaml | 0 .../templates/serviceaccount.yaml | 0 .../charts/iam-eks-user-mapper/values.yaml | 0 .../charts/q-storageclass/.helmignore | 0 .../charts/q-storageclass/Chart.yaml | 0 .../q-storageclass/templates/_helpers.tpl | 0 .../templates/storageclass.yaml | 0 .../charts/q-storageclass/values.yaml | 0 lib/aws/bootstrap-eks/documentdb.tf | 81 +++++ .../eks-ebs-csi-driver.tf | 0 .../eks-gen-kubectl-config.j2.tf | 0 .../eks-master-cluster.j2.tf | 0 .../eks-master-iam.tf | 0 .../eks-master-sec-group.tf | 0 .../eks-s3-kubeconfig-store.tf | 0 lib/aws/bootstrap-eks/eks-vpc-common.j2.tf | 42 +++ .../eks-vpc-with-nat-gateways.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 75 ++++ .../eks-workers-iam.tf | 0 .../eks-workers-nodes.j2.tf | 0 .../eks-workers-sec-group.tf | 0 lib/aws/bootstrap-eks/elasticcache.tf | 80 +++++ lib/aws/bootstrap-eks/elasticsearch.tf | 79 +++++ .../helm-aws-iam-eks-user-mapper.tf | 0 .../helm-cluster-autoscaler.j2.tf | 0 .../helm-grafana.j2.tf | 0 .../helm-loki.j2.tf | 0 .../helm-nginx-ingress.tf | 0 .../{bootstrap => bootstrap-eks}/helper.j2.sh | 0 .../qovery-tf-config.j2.tf | 0 lib/aws/bootstrap-eks/qovery-vault.j2.tf | 29 ++ lib/aws/bootstrap-eks/rds.tf | 118 +++++++ lib/aws/bootstrap-eks/s3-qovery-buckets.tf | 44 +++ lib/aws/bootstrap-eks/tf-default-vars.j2.tf | 319 ++++++++++++++++++ lib/aws/bootstrap-eks/tf-providers-aws.j2.tf | 60 ++++ src/cloud_provider/aws/kubernetes/mod.rs | 42 ++- test_utilities/src/aws.rs | 3 + 118 files changed, 976 insertions(+), 6 deletions(-) rename lib/aws/{bootstrap => bootstrap-ec2}/backend.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/documentdb.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/eks-vpc-common.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/eks-vpc-without-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/elasticcache.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/elasticsearch.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/qovery-vault.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/rds.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/s3-qovery-buckets.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/tf-default-vars.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-ec2}/tf-providers-aws.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/README.md (100%) create mode 100644 lib/aws/bootstrap-eks/backend.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/external-dns.j2.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/grafana.j2.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/kube-prometheus-stack.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/loki.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/metrics-server.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/nginx-ingress.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/chart_values/pleco.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/crds/crds.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/config-map.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/daemon-set.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/pod-disruption-budget.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/podsecuritypolicy.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/rbac.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/service-accounts.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/templates/service.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-calico/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/secrets.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/service.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/templates/servicemonitor.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-limits-exporter/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/psp.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-node-termination-handler/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-ui-view/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/README.md (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/configmap.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/customresourcedefinition.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/eniconfig.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/aws-vpc-cni/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/templates/configmap.yml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/coredns-config/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/deployment.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/rbac.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/secret.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/iam-eks-user-mapper/values.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/.helmignore (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/Chart.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/templates/storageclass.yaml (100%) rename lib/aws/{bootstrap => bootstrap-eks}/charts/q-storageclass/values.yaml (100%) create mode 100644 lib/aws/bootstrap-eks/documentdb.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-ebs-csi-driver.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-gen-kubectl-config.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-cluster.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-iam.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-master-sec-group.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-s3-kubeconfig-store.tf (100%) create mode 100644 lib/aws/bootstrap-eks/eks-vpc-common.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-vpc-with-nat-gateways.j2.tf (100%) create mode 100644 lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-iam.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-nodes.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/eks-workers-sec-group.tf (100%) create mode 100644 lib/aws/bootstrap-eks/elasticcache.tf create mode 100644 lib/aws/bootstrap-eks/elasticsearch.tf rename lib/aws/{bootstrap => bootstrap-eks}/helm-aws-iam-eks-user-mapper.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-cluster-autoscaler.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-grafana.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-loki.j2.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helm-nginx-ingress.tf (100%) rename lib/aws/{bootstrap => bootstrap-eks}/helper.j2.sh (100%) rename lib/aws/{bootstrap => bootstrap-eks}/qovery-tf-config.j2.tf (100%) create mode 100644 lib/aws/bootstrap-eks/qovery-vault.j2.tf create mode 100644 lib/aws/bootstrap-eks/rds.tf create mode 100644 lib/aws/bootstrap-eks/s3-qovery-buckets.tf create mode 100644 lib/aws/bootstrap-eks/tf-default-vars.j2.tf create mode 100644 lib/aws/bootstrap-eks/tf-providers-aws.j2.tf diff --git a/lib/aws/bootstrap/backend.j2.tf b/lib/aws/bootstrap-ec2/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap/backend.j2.tf rename to lib/aws/bootstrap-ec2/backend.j2.tf diff --git a/lib/aws/bootstrap/documentdb.tf b/lib/aws/bootstrap-ec2/documentdb.tf similarity index 100% rename from lib/aws/bootstrap/documentdb.tf rename to lib/aws/bootstrap-ec2/documentdb.tf diff --git a/lib/aws/bootstrap/eks-vpc-common.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-common.j2.tf rename to lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf diff --git a/lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf rename to lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap/elasticcache.tf b/lib/aws/bootstrap-ec2/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap/elasticcache.tf rename to lib/aws/bootstrap-ec2/elasticcache.tf diff --git a/lib/aws/bootstrap/elasticsearch.tf b/lib/aws/bootstrap-ec2/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap/elasticsearch.tf rename to lib/aws/bootstrap-ec2/elasticsearch.tf diff --git a/lib/aws/bootstrap/qovery-vault.j2.tf b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap/qovery-vault.j2.tf rename to lib/aws/bootstrap-ec2/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap/rds.tf b/lib/aws/bootstrap-ec2/rds.tf similarity index 100% rename from lib/aws/bootstrap/rds.tf rename to lib/aws/bootstrap-ec2/rds.tf diff --git a/lib/aws/bootstrap/s3-qovery-buckets.tf b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap/s3-qovery-buckets.tf rename to lib/aws/bootstrap-ec2/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap/tf-default-vars.j2.tf b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap/tf-default-vars.j2.tf rename to lib/aws/bootstrap-ec2/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap/tf-providers-aws.j2.tf b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap/tf-providers-aws.j2.tf rename to lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf diff --git a/lib/aws/bootstrap/README.md b/lib/aws/bootstrap-eks/README.md similarity index 100% rename from lib/aws/bootstrap/README.md rename to lib/aws/bootstrap-eks/README.md diff --git a/lib/aws/bootstrap-eks/backend.j2.tf b/lib/aws/bootstrap-eks/backend.j2.tf new file mode 100644 index 00000000..a1418800 --- /dev/null +++ b/lib/aws/bootstrap-eks/backend.j2.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + access_key = "{{ aws_access_key_tfstates_account }}" + secret_key = "{{ aws_secret_key_tfstates_account }}" + bucket = "{{ aws_terraform_backend_bucket }}" + key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" + dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" + region = "{{ aws_region_tfstates_account }}" + } +} diff --git a/lib/aws/bootstrap/chart_values/external-dns.j2.yaml b/lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/external-dns.j2.yaml rename to lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml diff --git a/lib/aws/bootstrap/chart_values/grafana.j2.yaml b/lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/grafana.j2.yaml rename to lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml diff --git a/lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml b/lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml rename to lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml diff --git a/lib/aws/bootstrap/chart_values/loki.yaml b/lib/aws/bootstrap-eks/chart_values/loki.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/loki.yaml rename to lib/aws/bootstrap-eks/chart_values/loki.yaml diff --git a/lib/aws/bootstrap/chart_values/metrics-server.yaml b/lib/aws/bootstrap-eks/chart_values/metrics-server.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/metrics-server.yaml rename to lib/aws/bootstrap-eks/chart_values/metrics-server.yaml diff --git a/lib/aws/bootstrap/chart_values/nginx-ingress.yaml b/lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/nginx-ingress.yaml rename to lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml diff --git a/lib/aws/bootstrap/chart_values/pleco.yaml b/lib/aws/bootstrap-eks/chart_values/pleco.yaml similarity index 100% rename from lib/aws/bootstrap/chart_values/pleco.yaml rename to lib/aws/bootstrap-eks/chart_values/pleco.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/.helmignore b/lib/aws/bootstrap-eks/charts/aws-calico/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-calico/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/README.md b/lib/aws/bootstrap-eks/charts/aws-calico/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/README.md rename to lib/aws/bootstrap-eks/charts/aws-calico/README.md diff --git a/lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/service.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/templates/service.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml diff --git a/lib/aws/bootstrap/charts/aws-calico/values.yaml b/lib/aws/bootstrap-eks/charts/aws-calico/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-calico/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-calico/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml diff --git a/lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml b/lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/README.md rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/values.yaml b/lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-ui-view/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/README.md rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml rename to lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml diff --git a/lib/aws/bootstrap/charts/coredns-config/.helmignore b/lib/aws/bootstrap-eks/charts/coredns-config/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/.helmignore rename to lib/aws/bootstrap-eks/charts/coredns-config/.helmignore diff --git a/lib/aws/bootstrap/charts/coredns-config/Chart.yaml b/lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/Chart.yaml rename to lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml diff --git a/lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml b/lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml rename to lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml diff --git a/lib/aws/bootstrap/charts/coredns-config/values.yaml b/lib/aws/bootstrap-eks/charts/coredns-config/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/coredns-config/values.yaml rename to lib/aws/bootstrap-eks/charts/coredns-config/values.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml b/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml rename to lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/.helmignore b/lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/.helmignore rename to lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore diff --git a/lib/aws/bootstrap/charts/q-storageclass/Chart.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/Chart.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl b/lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl rename to lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl diff --git a/lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml diff --git a/lib/aws/bootstrap/charts/q-storageclass/values.yaml b/lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml similarity index 100% rename from lib/aws/bootstrap/charts/q-storageclass/values.yaml rename to lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml diff --git a/lib/aws/bootstrap-eks/documentdb.tf b/lib/aws/bootstrap-eks/documentdb.tf new file mode 100644 index 00000000..ea04fec0 --- /dev/null +++ b/lib/aws/bootstrap-eks/documentdb.tf @@ -0,0 +1,81 @@ +locals { + tags_documentdb = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "DocumentDB" + } + ) +} + +# Network + +resource "aws_subnet" "documentdb_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.documentdb_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.documentdb_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_subnet" "documentdb_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.documentdb_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_documentdb +} + +resource "aws_route_table_association" "documentdb_cluster_zone_a" { + count = length(var.documentdb_subnets_zone_a) + + subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_b" { + count = length(var.documentdb_subnets_zone_b) + + subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "documentdb_cluster_zone_c" { + count = length(var.documentdb_subnets_zone_c) + + subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_docdb_subnet_group" "documentdb" { + description = "DocumentDB linked to ${var.kubernetes_cluster_id}" + name = "documentdb-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) + + tags = local.tags_documentdb +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "documentdb_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow DocumentDB incoming access from anywhere" + from_port = 27017 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 27017 + type = "ingress" +} diff --git a/lib/aws/bootstrap/eks-ebs-csi-driver.tf b/lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf similarity index 100% rename from lib/aws/bootstrap/eks-ebs-csi-driver.tf rename to lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf diff --git a/lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf b/lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf rename to lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf diff --git a/lib/aws/bootstrap/eks-master-cluster.j2.tf b/lib/aws/bootstrap-eks/eks-master-cluster.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-cluster.j2.tf rename to lib/aws/bootstrap-eks/eks-master-cluster.j2.tf diff --git a/lib/aws/bootstrap/eks-master-iam.tf b/lib/aws/bootstrap-eks/eks-master-iam.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-iam.tf rename to lib/aws/bootstrap-eks/eks-master-iam.tf diff --git a/lib/aws/bootstrap/eks-master-sec-group.tf b/lib/aws/bootstrap-eks/eks-master-sec-group.tf similarity index 100% rename from lib/aws/bootstrap/eks-master-sec-group.tf rename to lib/aws/bootstrap-eks/eks-master-sec-group.tf diff --git a/lib/aws/bootstrap/eks-s3-kubeconfig-store.tf b/lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf similarity index 100% rename from lib/aws/bootstrap/eks-s3-kubeconfig-store.tf rename to lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf new file mode 100644 index 00000000..63b91880 --- /dev/null +++ b/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf @@ -0,0 +1,42 @@ +data "aws_availability_zones" "available" {} + +locals { + tags_eks_vpc = merge( + local.tags_common, + { + Name = "qovery-eks-workers", + "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", + "kubernetes.io/role/elb" = 1, + {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} + } + ) + + tags_eks_vpc_public = merge( + local.tags_eks_vpc, + { + "Public" = "true" + } + ) + + tags_eks_vpc_private = merge( + local.tags_eks, + { + "Public" = "false" + } + ) +} + +# VPC +resource "aws_vpc" "eks" { + cidr_block = var.vpc_cidr_block + enable_dns_hostnames = true + + tags = local.tags_eks_vpc +} + +# Internet gateway +resource "aws_internet_gateway" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + tags = local.tags_eks_vpc +} \ No newline at end of file diff --git a/lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf rename to lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf new file mode 100644 index 00000000..d0174308 --- /dev/null +++ b/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf @@ -0,0 +1,75 @@ +{% if vpc_qovery_network_mode == "WithoutNatGateways" %} +# Public subnets +resource "aws_subnet" "eks_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.eks_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.eks_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_subnet" "eks_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.eks_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.eks.id + map_public_ip_on_launch = true + + tags = local.tags_eks_vpc +} + +resource "aws_route_table" "eks_cluster" { + vpc_id = aws_vpc.eks.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.eks_cluster.id + } + + // todo(pmavro): add tests for it when it will be available in the SDK + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_eks_vpc +} + +resource "aws_route_table_association" "eks_cluster_zone_a" { + count = length(var.eks_subnets_zone_a_private) + + subnet_id = aws_subnet.eks_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_b" { + count = length(var.eks_subnets_zone_b_private) + + subnet_id = aws_subnet.eks_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "eks_cluster_zone_c" { + count = length(var.eks_subnets_zone_c_private) + + subnet_id = aws_subnet.eks_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} +{% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap/eks-workers-iam.tf b/lib/aws/bootstrap-eks/eks-workers-iam.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-iam.tf rename to lib/aws/bootstrap-eks/eks-workers-iam.tf diff --git a/lib/aws/bootstrap/eks-workers-nodes.j2.tf b/lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-nodes.j2.tf rename to lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf diff --git a/lib/aws/bootstrap/eks-workers-sec-group.tf b/lib/aws/bootstrap-eks/eks-workers-sec-group.tf similarity index 100% rename from lib/aws/bootstrap/eks-workers-sec-group.tf rename to lib/aws/bootstrap-eks/eks-workers-sec-group.tf diff --git a/lib/aws/bootstrap-eks/elasticcache.tf b/lib/aws/bootstrap-eks/elasticcache.tf new file mode 100644 index 00000000..44073c63 --- /dev/null +++ b/lib/aws/bootstrap-eks/elasticcache.tf @@ -0,0 +1,80 @@ +locals { + tags_elasticache = merge( + aws_eks_cluster.eks_cluster.tags, + { + "Service" = "Elasticache" + } + ) +} + +# Network + +resource "aws_subnet" "elasticache_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticache_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticache_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_subnet" "elasticache_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticache_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticache +} + +resource "aws_route_table_association" "elasticache_cluster_zone_a" { + count = length(var.elasticache_subnets_zone_a) + + subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_b" { + count = length(var.elasticache_subnets_zone_b) + + subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticache_cluster_zone_c" { + count = length(var.elasticache_subnets_zone_c) + + subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_elasticache_subnet_group" "elasticache" { + description = "Elasticache linked to ${var.kubernetes_cluster_id}" + # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly + name = "elasticache-${aws_vpc.eks.id}" + subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) +} + +# Todo: create a bastion to avoid this + +resource "aws_security_group_rule" "elasticache_remote_access" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow Redis incoming access from anywhere" + from_port = 6379 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_workers.id + to_port = 6379 + type = "ingress" +} diff --git a/lib/aws/bootstrap-eks/elasticsearch.tf b/lib/aws/bootstrap-eks/elasticsearch.tf new file mode 100644 index 00000000..f5e873dd --- /dev/null +++ b/lib/aws/bootstrap-eks/elasticsearch.tf @@ -0,0 +1,79 @@ +locals { + tags_elasticsearch = merge( + local.tags_eks, + { + "Service" = "Elasticsearch" + } + ) +} + +# Network + +resource "aws_subnet" "elasticsearch_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.elasticsearch_subnets_zone_a[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.elasticsearch_subnets_zone_b[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_subnet" "elasticsearch_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.elasticsearch_subnets_zone_c[count.index] + vpc_id = aws_vpc.eks.id + + tags = local.tags_elasticsearch +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { + count = length(var.elasticsearch_subnets_zone_a) + + subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { + count = length(var.elasticsearch_subnets_zone_b) + + subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { + count = length(var.elasticsearch_subnets_zone_c) + + subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] + route_table_id = aws_route_table.eks_cluster.id +} + +resource "aws_security_group" "elasticsearch" { + name = "elasticsearch-${var.kubernetes_cluster_id}" + description = "Elasticsearch security group" + vpc_id = aws_vpc.eks.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + + cidr_blocks = [ + aws_vpc.eks.cidr_block + ] + } + + tags = local.tags_elasticsearch +} diff --git a/lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf b/lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf similarity index 100% rename from lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf rename to lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf diff --git a/lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf b/lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf rename to lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf diff --git a/lib/aws/bootstrap/helm-grafana.j2.tf b/lib/aws/bootstrap-eks/helm-grafana.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-grafana.j2.tf rename to lib/aws/bootstrap-eks/helm-grafana.j2.tf diff --git a/lib/aws/bootstrap/helm-loki.j2.tf b/lib/aws/bootstrap-eks/helm-loki.j2.tf similarity index 100% rename from lib/aws/bootstrap/helm-loki.j2.tf rename to lib/aws/bootstrap-eks/helm-loki.j2.tf diff --git a/lib/aws/bootstrap/helm-nginx-ingress.tf b/lib/aws/bootstrap-eks/helm-nginx-ingress.tf similarity index 100% rename from lib/aws/bootstrap/helm-nginx-ingress.tf rename to lib/aws/bootstrap-eks/helm-nginx-ingress.tf diff --git a/lib/aws/bootstrap/helper.j2.sh b/lib/aws/bootstrap-eks/helper.j2.sh similarity index 100% rename from lib/aws/bootstrap/helper.j2.sh rename to lib/aws/bootstrap-eks/helper.j2.sh diff --git a/lib/aws/bootstrap/qovery-tf-config.j2.tf b/lib/aws/bootstrap-eks/qovery-tf-config.j2.tf similarity index 100% rename from lib/aws/bootstrap/qovery-tf-config.j2.tf rename to lib/aws/bootstrap-eks/qovery-tf-config.j2.tf diff --git a/lib/aws/bootstrap-eks/qovery-vault.j2.tf b/lib/aws/bootstrap-eks/qovery-vault.j2.tf new file mode 100644 index 00000000..b12afa38 --- /dev/null +++ b/lib/aws/bootstrap-eks/qovery-vault.j2.tf @@ -0,0 +1,29 @@ +locals { + kubeconfig_base64 = base64encode(local.kubeconfig) +} +// do not run for tests clusters to avoid uncleaned info. +// do not try to use count into resource, it will fails trying to connect to vault +{% if vault_auth_method != "none" and not test_cluster %} +resource "vault_generic_secret" "cluster-access" { + path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" + + data_json = <, + pub ec2_zone_b_subnet_blocks: Vec, + pub ec2_zone_c_subnet_blocks: Vec, pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, pub eks_zone_c_subnet_blocks: Vec, @@ -154,7 +156,7 @@ impl EKS { logger: Box, ) -> Result { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + let template_directory = format!("{}/aws/bootstrap-eks", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; @@ -796,7 +798,7 @@ impl EC2 { logger: Box, ) -> Result { let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + let template_directory = format!("{}/aws/bootstrap-ec2", context.lib_root_dir()); let aws_zones = aws_zones(zones, ®ion, &event_details)?; let s3 = s3(&context, ®ion, &**cloud_provider); @@ -1267,6 +1269,30 @@ fn tera_context( VpcQoveryNetworkMode::WithoutNatGateways => {} }; + let mut ec2_zone_a_subnet_blocks_private = format_ips(&options.ec2_zone_a_subnet_blocks); + let mut ec2_zone_b_subnet_blocks_private = format_ips(&options.ec2_zone_b_subnet_blocks); + let mut ec2_zone_c_subnet_blocks_private = format_ips(&options.ec2_zone_c_subnet_blocks); + + match options.vpc_qovery_network_mode { + VpcQoveryNetworkMode::WithNatGateways => { + let max_subnet_zone_a = check_odd_subnets(event_details.clone(), "a", &ec2_zone_a_subnet_blocks_private)?; + let max_subnet_zone_b = check_odd_subnets(event_details.clone(), "b", &ec2_zone_b_subnet_blocks_private)?; + let max_subnet_zone_c = check_odd_subnets(event_details.clone(), "c", &ec2_zone_c_subnet_blocks_private)?; + + let ec2_zone_a_subnet_blocks_public: Vec = + ec2_zone_a_subnet_blocks_private.drain(max_subnet_zone_a..).collect(); + let ec2_zone_b_subnet_blocks_public: Vec = + ec2_zone_b_subnet_blocks_private.drain(max_subnet_zone_b..).collect(); + let ec2_zone_c_subnet_blocks_public: Vec = + ec2_zone_c_subnet_blocks_private.drain(max_subnet_zone_c..).collect(); + + context.insert("ec2_zone_a_subnet_blocks_public", &ec2_zone_a_subnet_blocks_public); + context.insert("ec2_zone_b_subnet_blocks_public", &ec2_zone_b_subnet_blocks_public); + context.insert("ec2_zone_c_subnet_blocks_public", &ec2_zone_c_subnet_blocks_public); + } + VpcQoveryNetworkMode::WithoutNatGateways => {} + }; + context.insert("vpc_qovery_network_mode", &options.vpc_qovery_network_mode.to_string()); let rds_zone_a_subnet_blocks = format_ips(&options.rds_zone_a_subnet_blocks); @@ -1434,7 +1460,10 @@ fn tera_context( context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); - context.insert("eks_worker_nodes", &node_groups); // FIXME + context.insert("eks_worker_nodes", &node_groups); + context.insert("ec2_zone_a_subnet_blocks_private", &ec2_zone_a_subnet_blocks_private); + context.insert("ec2_zone_b_subnet_blocks_private", &ec2_zone_b_subnet_blocks_private); + context.insert("ec2_zone_c_subnet_blocks_private", &ec2_zone_c_subnet_blocks_private); context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private); context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private); context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); @@ -1792,8 +1821,8 @@ fn pause( )); } - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap-{type}/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap-{type}/*.tf files let bootstrap_charts_dir = format!("{}/common/bootstrap/charts", kubernetes.context().lib_root_dir()); let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); if let Err(e) = crate::template::copy_non_template_files(&bootstrap_charts_dir, common_charts_temp_dir.as_str()) { @@ -1916,6 +1945,7 @@ fn pause( kubernetes .logger() .log(EngineEvent::Info(event_details, EventMessage::new_from_safe(message))); + Ok(()) } Err(e) => Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)), diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 8a692f45..c6c5c4b1 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -154,6 +154,9 @@ impl Cluster for AWS { fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> Options { Options { + ec2_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], + ec2_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], + ec2_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], eks_zone_a_subnet_blocks: vec!["10.0.0.0/20".to_string(), "10.0.16.0/20".to_string()], eks_zone_b_subnet_blocks: vec!["10.0.32.0/20".to_string(), "10.0.48.0/20".to_string()], eks_zone_c_subnet_blocks: vec!["10.0.64.0/20".to_string(), "10.0.80.0/20".to_string()], From b1062387d67e97afb126fc99b22729b179d2d572 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 22 Apr 2022 16:41:46 +0200 Subject: [PATCH 071/122] feat: update tf config for aws ec2 --- lib/aws/bootstrap-ec2/documentdb.tf | 18 +-- lib/aws/bootstrap-ec2/ec2-sec-group.tf | 27 +++++ ...-vpc-common.j2.tf => ec2-vpc-common.j2.tf} | 25 ++-- lib/aws/bootstrap-ec2/ec2-vpc.j2.tf | 72 +++++++++++ lib/aws/bootstrap-ec2/ec2.j2.tf | 61 ++++++++++ .../eks-vpc-without-nat-gateways.j2.tf | 75 ------------ lib/aws/bootstrap-ec2/elasticcache.tf | 18 +-- lib/aws/bootstrap-ec2/elasticsearch.tf | 18 +-- lib/aws/bootstrap-ec2/qovery-vault.j2.tf | 2 +- lib/aws/bootstrap-ec2/rds.tf | 20 ++-- lib/aws/bootstrap-ec2/s3-qovery-buckets.tf | 4 +- lib/aws/bootstrap-ec2/tf-default-vars.j2.tf | 112 +++++++++--------- 12 files changed, 263 insertions(+), 189 deletions(-) create mode 100644 lib/aws/bootstrap-ec2/ec2-sec-group.tf rename lib/aws/bootstrap-ec2/{eks-vpc-common.j2.tf => ec2-vpc-common.j2.tf} (55%) create mode 100644 lib/aws/bootstrap-ec2/ec2-vpc.j2.tf create mode 100644 lib/aws/bootstrap-ec2/ec2.j2.tf delete mode 100644 lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-ec2/documentdb.tf b/lib/aws/bootstrap-ec2/documentdb.tf index ea04fec0..04ca6934 100644 --- a/lib/aws/bootstrap-ec2/documentdb.tf +++ b/lib/aws/bootstrap-ec2/documentdb.tf @@ -1,6 +1,6 @@ locals { tags_documentdb = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "DocumentDB" } @@ -14,7 +14,7 @@ resource "aws_subnet" "documentdb_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.documentdb_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -24,7 +24,7 @@ resource "aws_subnet" "documentdb_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.documentdb_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -34,7 +34,7 @@ resource "aws_subnet" "documentdb_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.documentdb_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_documentdb } @@ -43,26 +43,26 @@ resource "aws_route_table_association" "documentdb_cluster_zone_a" { count = length(var.documentdb_subnets_zone_a) subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "documentdb_cluster_zone_b" { count = length(var.documentdb_subnets_zone_b) subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "documentdb_cluster_zone_c" { count = length(var.documentdb_subnets_zone_c) subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_docdb_subnet_group" "documentdb" { description = "DocumentDB linked to ${var.kubernetes_cluster_id}" - name = "documentdb-${aws_vpc.eks.id}" + name = "documentdb-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) tags = local.tags_documentdb @@ -75,7 +75,7 @@ resource "aws_security_group_rule" "documentdb_remote_access" { description = "Allow DocumentDB incoming access from anywhere" from_port = 27017 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 27017 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws/bootstrap-ec2/ec2-sec-group.tf new file mode 100644 index 00000000..02cd4bfc --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2-sec-group.tf @@ -0,0 +1,27 @@ +resource "aws_security_group" "ec2_cluster" { + name = "qovery-ec2-${var.kubernetes_cluster_id}" + description = "Cluster communication with worker nodes" + vpc_id = aws_vpc.ec2.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.tags_ec2 +} + +# OPTIONAL: Allow inbound traffic from your local workstation external IP +# to the Kubernetes. You will need to replace A.B.C.D below with +# your real IP. Services like icanhazip.com can help you find this. +resource "aws_security_group_rule" "cluster_ingress_workstation_https" { + cidr_blocks = var.ec2_access_cidr_blocks + description = "Allow workstation to communicate with the cluster API Server" + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.ec2_cluster.id + to_port = 443 + type = "ingress" +} diff --git a/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf b/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf similarity index 55% rename from lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf rename to lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf index 63b91880..cc8682f3 100644 --- a/lib/aws/bootstrap-ec2/eks-vpc-common.j2.tf +++ b/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf @@ -1,42 +1,35 @@ data "aws_availability_zones" "available" {} locals { - tags_eks_vpc = merge( + tags_ec2_vpc = merge( local.tags_common, { - Name = "qovery-eks-workers", + Name = "qovery-ec2-workers", "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", "kubernetes.io/role/elb" = 1, {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} } ) - tags_eks_vpc_public = merge( - local.tags_eks_vpc, + tags_ec2_vpc_public = merge( + local.tags_ec2_vpc, { "Public" = "true" } ) - - tags_eks_vpc_private = merge( - local.tags_eks, - { - "Public" = "false" - } - ) } # VPC -resource "aws_vpc" "eks" { +resource "aws_vpc" "ec2" { cidr_block = var.vpc_cidr_block enable_dns_hostnames = true - tags = local.tags_eks_vpc + tags = local.tags_ec2_vpc } # Internet gateway -resource "aws_internet_gateway" "eks_cluster" { - vpc_id = aws_vpc.eks.id +resource "aws_internet_gateway" "ec2_instance" { + vpc_id = aws_vpc.ec2.id - tags = local.tags_eks_vpc + tags = local.tags_ec2_vpc } \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf b/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf new file mode 100644 index 00000000..27915b66 --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf @@ -0,0 +1,72 @@ +# Public subnets +resource "aws_subnet" "ec2_zone_a" { + count = length(var.ec2_subnets_zone_a_private) + + availability_zone = var.aws_availability_zones[0] + cidr_block = var.ec2_subnets_zone_a_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_subnet" "ec2_zone_b" { + count = length(var.ec2_subnets_zone_b_private) + + availability_zone = var.aws_availability_zones[1] + cidr_block = var.ec2_subnets_zone_b_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_subnet" "ec2_zone_c" { + count = length(var.ec2_subnets_zone_c_private) + + availability_zone = var.aws_availability_zones[2] + cidr_block = var.ec2_subnets_zone_c_private[count.index] + vpc_id = aws_vpc.ec2.id + map_public_ip_on_launch = true + + tags = local.tags_ec2_vpc +} + +resource "aws_route_table" "ec2_cluster" { + vpc_id = aws_vpc.ec2.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ec2_instance.id + } + + {% for route in vpc_custom_routing_table %} + route { + cidr_block = "{{ route.destination }}" + gateway_id = "{{ route.target }}" + } + {% endfor %} + + tags = local.tags_ec2_vpc +} + +resource "aws_route_table_association" "ec2_cluster_zone_a" { + count = length(var.ec2_subnets_zone_a_private) + + subnet_id = aws_subnet.ec2_zone_a.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} + +resource "aws_route_table_association" "ec2_cluster_zone_b" { + count = length(var.ec2_subnets_zone_b_private) + + subnet_id = aws_subnet.ec2_zone_b.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} + +resource "aws_route_table_association" "ec2_cluster_zone_c" { + count = length(var.ec2_subnets_zone_c_private) + + subnet_id = aws_subnet.ec2_zone_c.*.id[count.index] + route_table_id = aws_route_table.ec2_cluster.id +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws/bootstrap-ec2/ec2.j2.tf new file mode 100644 index 00000000..e85ed5f9 --- /dev/null +++ b/lib/aws/bootstrap-ec2/ec2.j2.tf @@ -0,0 +1,61 @@ +data "aws_ami" "debian" { + most_recent = true + + filter { + name = "name" + values = [var.ec2_image_info.name] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + # to get owner id: + # aws ec2 describe-images --image-ids --region us-west-2 | jq -r '.Images[0].OwnerId' + owners = [var.ec2_image_info.owners] +} + +resource "aws_instance" "web" { + ami = data.aws_ami.debian.id + instance_type = var.ec2_instance.instance_type + + # disk + root_block_device { + volume_size = "30" # GiB + volume_type = "gp2" + encrypted = true + } + + # network + associate_public_ip_address = true + + # security + #vpc_security_group_ids = [aws_vpc.ec2.*.id] + + user_data = local.bootstrap + + tags = { + Name = "HelloWorld" + } +} + +locals { + bootstrap = <> /etc/profile + +while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do + echo "kubeconfig is not yet present, sleeping" + sleep 1 +done +s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put /etc/rancher/k3s/k3s.yaml s3://${var.s3_bucket_kubeconfig}/${var.kubernetes_cluster_id}.yaml +BOOTSTRAP +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf deleted file mode 100644 index d0174308..00000000 --- a/lib/aws/bootstrap-ec2/eks-vpc-without-nat-gateways.j2.tf +++ /dev/null @@ -1,75 +0,0 @@ -{% if vpc_qovery_network_mode == "WithoutNatGateways" %} -# Public subnets -resource "aws_subnet" "eks_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.eks_subnets_zone_a_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.eks_subnets_zone_b_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.eks_subnets_zone_c_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_route_table" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.eks_cluster.id - } - - // todo(pmavro): add tests for it when it will be available in the SDK - {% for route in vpc_custom_routing_table %} - route { - cidr_block = "{{ route.destination }}" - gateway_id = "{{ route.target }}" - } - {% endfor %} - - tags = local.tags_eks_vpc -} - -resource "aws_route_table_association" "eks_cluster_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - subnet_id = aws_subnet.eks_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - subnet_id = aws_subnet.eks_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - subnet_id = aws_subnet.eks_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} -{% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/elasticcache.tf b/lib/aws/bootstrap-ec2/elasticcache.tf index 44073c63..b19c3494 100644 --- a/lib/aws/bootstrap-ec2/elasticcache.tf +++ b/lib/aws/bootstrap-ec2/elasticcache.tf @@ -1,6 +1,6 @@ locals { tags_elasticache = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "Elasticache" } @@ -14,7 +14,7 @@ resource "aws_subnet" "elasticache_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.elasticache_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -24,7 +24,7 @@ resource "aws_subnet" "elasticache_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.elasticache_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -34,7 +34,7 @@ resource "aws_subnet" "elasticache_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.elasticache_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticache } @@ -43,27 +43,27 @@ resource "aws_route_table_association" "elasticache_cluster_zone_a" { count = length(var.elasticache_subnets_zone_a) subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticache_cluster_zone_b" { count = length(var.elasticache_subnets_zone_b) subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticache_cluster_zone_c" { count = length(var.elasticache_subnets_zone_c) subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_elasticache_subnet_group" "elasticache" { description = "Elasticache linked to ${var.kubernetes_cluster_id}" # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly - name = "elasticache-${aws_vpc.eks.id}" + name = "elasticache-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) } @@ -74,7 +74,7 @@ resource "aws_security_group_rule" "elasticache_remote_access" { description = "Allow Redis incoming access from anywhere" from_port = 6379 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 6379 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/elasticsearch.tf b/lib/aws/bootstrap-ec2/elasticsearch.tf index f5e873dd..bbd3a685 100644 --- a/lib/aws/bootstrap-ec2/elasticsearch.tf +++ b/lib/aws/bootstrap-ec2/elasticsearch.tf @@ -1,6 +1,6 @@ locals { tags_elasticsearch = merge( - local.tags_eks, + local.tags_ec2, { "Service" = "Elasticsearch" } @@ -14,7 +14,7 @@ resource "aws_subnet" "elasticsearch_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.elasticsearch_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -24,7 +24,7 @@ resource "aws_subnet" "elasticsearch_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.elasticsearch_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -34,7 +34,7 @@ resource "aws_subnet" "elasticsearch_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.elasticsearch_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_elasticsearch } @@ -43,27 +43,27 @@ resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { count = length(var.elasticsearch_subnets_zone_a) subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { count = length(var.elasticsearch_subnets_zone_b) subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { count = length(var.elasticsearch_subnets_zone_c) subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_security_group" "elasticsearch" { name = "elasticsearch-${var.kubernetes_cluster_id}" description = "Elasticsearch security group" - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id ingress { from_port = 443 @@ -71,7 +71,7 @@ resource "aws_security_group" "elasticsearch" { protocol = "tcp" cidr_blocks = [ - aws_vpc.eks.cidr_block + aws_vpc.ec2.cidr_block ] } diff --git a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf index b12afa38..9288c182 100644 --- a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf +++ b/lib/aws/bootstrap-ec2/qovery-vault.j2.tf @@ -23,7 +23,7 @@ resource "vault_generic_secret" "cluster-access" { EOT depends_on = [ - aws_eks_cluster.eks_cluster, + aws_ec2_cluster.ec2_cluster, ] } {% endif %} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/rds.tf b/lib/aws/bootstrap-ec2/rds.tf index 9207b0ca..dbc76ad3 100644 --- a/lib/aws/bootstrap-ec2/rds.tf +++ b/lib/aws/bootstrap-ec2/rds.tf @@ -15,7 +15,7 @@ data "aws_iam_policy_document" "rds_enhanced_monitoring" { locals { tags_rds = merge( - aws_eks_cluster.eks_cluster.tags, + aws_ec2_cluster.ec2_cluster.tags, { "Service" = "RDS" } @@ -28,7 +28,7 @@ resource "aws_subnet" "rds_zone_a" { availability_zone = var.aws_availability_zones[0] cidr_block = var.rds_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -38,7 +38,7 @@ resource "aws_subnet" "rds_zone_b" { availability_zone = var.aws_availability_zones[1] cidr_block = var.rds_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -48,7 +48,7 @@ resource "aws_subnet" "rds_zone_c" { availability_zone = var.aws_availability_zones[2] cidr_block = var.rds_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id + vpc_id = aws_vpc.ec2.id tags = local.tags_rds } @@ -57,26 +57,26 @@ resource "aws_route_table_association" "rds_cluster_zone_a" { count = length(var.rds_subnets_zone_a) subnet_id = aws_subnet.rds_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "rds_cluster_zone_b" { count = length(var.rds_subnets_zone_b) subnet_id = aws_subnet.rds_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_route_table_association" "rds_cluster_zone_c" { count = length(var.rds_subnets_zone_c) subnet_id = aws_subnet.rds_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id + route_table_id = aws_route_table.ec2_cluster.id } resource "aws_db_subnet_group" "rds" { description = "RDS linked to ${var.kubernetes_cluster_id}" - name = aws_vpc.eks.id + name = aws_vpc.ec2.id subnet_ids = flatten([aws_subnet.rds_zone_a.*.id, aws_subnet.rds_zone_b.*.id, aws_subnet.rds_zone_c.*.id]) tags = local.tags_rds @@ -102,7 +102,7 @@ resource "aws_security_group_rule" "postgres_remote_access" { description = "Allow RDS PostgreSQL incoming access from anywhere" from_port = 5432 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 5432 type = "ingress" } @@ -112,7 +112,7 @@ resource "aws_security_group_rule" "mysql_remote_access" { description = "Allow RDS MySQL incoming access from anywhere" from_port = 3306 protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id + security_group_id = aws_security_group.ec2_cluster_workers.id to_port = 3306 type = "ingress" } diff --git a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf index 34373142..b5680921 100644 --- a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf +++ b/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf @@ -8,7 +8,7 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" { } tags = merge( - local.tags_eks, + local.tags_ec2, { "Name" = "Kubernetes kubeconfig" } @@ -27,7 +27,7 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" { resource "aws_kms_key" "s3_kubeconfig_kms_encryption" { description = "s3 kubeconfig encryption" tags = merge( - local.tags_eks, + local.tags_ec2, { "Name" = "Kubeconfig Encryption" } diff --git a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf index b9517338..68c86897 100644 --- a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf +++ b/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf @@ -56,57 +56,84 @@ variable "vpc_cidr_block" { type = string } -# Kubernetes +# ec2 -variable "eks_subnets_zone_a_private" { - description = "EKS private subnets Zone A" - default = {{ eks_zone_a_subnet_blocks_private }} +variable "ec2_image_info" { + description = "EC2 image information" + default = { + "name" = "debian-10-amd64*" + "owners" = "136693071363" + } + type = map(string) +} + +variable "ec2_instance" { + description = "EC2 instance configuration" + default = { + "instance_type" = "t3.micro" + } + type = map(string) +} + +variable "k3s_config" { + description = "K3s configuration" + default = { + "version" = "v1.20.15+k3s1" + "channel" = "latest" + "exec" = "--disable=traefik" + } + type = map(string) +} + +variable "ec2_subnets_zone_a_private" { + description = "EC2 private subnets Zone A" + default = {{ ec2_zone_a_subnet_blocks_private }} type = list(string) } -variable "eks_subnets_zone_b_private" { - description = "EKS private subnets Zone B" - default = {{ eks_zone_b_subnet_blocks_private }} +variable "ec2_subnets_zone_b_private" { + description = "EC2 private subnets Zone B" + default = {{ ec2_zone_b_subnet_blocks_private }} type = list(string) } -variable "eks_subnets_zone_c_private" { - description = "EKS private subnets Zone C" - default = {{ eks_zone_c_subnet_blocks_private }} +variable "ec2_subnets_zone_c_private" { + description = "EC2 private subnets Zone C" + default = {{ ec2_zone_c_subnet_blocks_private }} type = list(string) } {% if vpc_qovery_network_mode == "WithNatGateways" %} -variable "eks_subnets_zone_a_public" { - description = "EKS public subnets Zone A" - default = {{ eks_zone_a_subnet_blocks_public }} +variable "ec2_subnets_zone_a_public" { + description = "EC2 public subnets Zone A" + default = {{ ec2_zone_a_subnet_blocks_public }} type = list(string) } -variable "eks_subnets_zone_b_public" { - description = "EKS public subnets Zone B" - default = {{ eks_zone_b_subnet_blocks_public }} +variable "ec2_subnets_zone_b_public" { + description = "EC2 public subnets Zone B" + default = {{ ec2_zone_b_subnet_blocks_public }} type = list(string) } -variable "eks_subnets_zone_c_public" { - description = "EKS public subnets Zone C" - default = {{ eks_zone_c_subnet_blocks_public }} +variable "ec2_subnets_zone_c_public" { + description = "EC2 public subnets Zone C" + default = {{ ec2_zone_c_subnet_blocks_public }} type = list(string) } {% endif %} -variable "eks_cidr_subnet" { - description = "EKS CIDR (x.x.x.x/CIDR)" - default = {{ eks_cidr_subnet }} +variable "ec2_cidr_subnet" { + description = "EC2 CIDR (x.x.x.x/CIDR)" + default = {{ ec2_cidr_subnet }} type = number } -variable "eks_k8s_versions" { +variable "ec2_k8s_versions" { description = "Kubernetes version" default = { - "masters": "{{ eks_masters_version }}", - "workers": "{{ eks_workers_version }}", + "masters": "{{ ec2_masters_version }}", + "workers": "{{ ec2_workers_version }}", } type = map(string) } @@ -129,18 +156,12 @@ variable "kubernetes_cluster_name" { type = string } -variable "eks_access_cidr_blocks" { +variable "ec2_access_cidr_blocks" { description = "Kubernetes CIDR Block" - default = {{ eks_access_cidr_blocks }} + default = {{ ec2_access_cidr_blocks }} type = list(string) } -variable "eks_cloudwatch_log_group" { - description = "AWS cloudwatch log group for EKS" - default = "qovery-{{ eks_cloudwatch_log_group }}" - type = string -} - # S3 bucket name variable "s3_bucket_kubeconfig" { @@ -149,23 +170,6 @@ variable "s3_bucket_kubeconfig" { type = string } -# Engine info - -variable "qovery_engine_info" { - description = "Qovery engine info" - default = { - "token" = "{{ engine_version_controller_token }}" - "api_fqdn" = "{{ qovery_api_url }}" - } - type = map(string) -} - -variable "qovery_engine_replicas" { - description = "This variable is used to get random ID generated for the engine" - default = "2" - type = number -} - # Agent info variable "qovery_agent_info" { @@ -287,14 +291,6 @@ variable "elasticsearch_cidr_subnet" { type = number } -# Helm alert manager discord - -variable "discord_api_key" { - description = "discord url with token for used for alerting" - default = "{{ discord_api_key }}" - type = string -} - # Qovery features variable "log_history_enabled" { From b5d71b2abb7988ea9c6023b3a50afb85b0305bb4 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Fri, 22 Apr 2022 19:58:06 +0200 Subject: [PATCH 072/122] feat: add security rules to aws EC2 --- lib/aws/bootstrap-ec2/ec2-sec-group.tf | 19 +++++++++++++------ lib/aws/bootstrap-ec2/ec2.j2.tf | 4 +++- lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws/bootstrap-ec2/ec2-sec-group.tf index 02cd4bfc..a82bd0e9 100644 --- a/lib/aws/bootstrap-ec2/ec2-sec-group.tf +++ b/lib/aws/bootstrap-ec2/ec2-sec-group.tf @@ -13,15 +13,22 @@ resource "aws_security_group" "ec2_cluster" { tags = local.tags_ec2 } -# OPTIONAL: Allow inbound traffic from your local workstation external IP -# to the Kubernetes. You will need to replace A.B.C.D below with -# your real IP. Services like icanhazip.com can help you find this. -resource "aws_security_group_rule" "cluster_ingress_workstation_https" { - cidr_blocks = var.ec2_access_cidr_blocks - description = "Allow workstation to communicate with the cluster API Server" +resource "aws_security_group_rule" "https" { + cidr_blocks = "0.0.0.0/0" + description = "HTTPS connectivity" from_port = 443 protocol = "tcp" security_group_id = aws_security_group.ec2_cluster.id to_port = 443 type = "ingress" } + +resource "aws_security_group_rule" "ssh" { + cidr_blocks = "0.0.0.0/0" + description = "SSH remote access" + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.ec2_cluster.id + to_port = 22 + type = "ssh" +} \ No newline at end of file diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws/bootstrap-ec2/ec2.j2.tf index e85ed5f9..2a9bb030 100644 --- a/lib/aws/bootstrap-ec2/ec2.j2.tf +++ b/lib/aws/bootstrap-ec2/ec2.j2.tf @@ -31,7 +31,9 @@ resource "aws_instance" "web" { associate_public_ip_address = true # security - #vpc_security_group_ids = [aws_vpc.ec2.*.id] + vpc_security_group_ids = [aws_vpc.ec2.id] + subnet_id = aws_subnet.ec2_zone_a.id + security_groups = [aws_security_group.ec2_cluster.id] user_data = local.bootstrap diff --git a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf index c4612160..e5235b07 100644 --- a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf +++ b/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.66.0" + version = "~> 4.11.0" } external = { source = "hashicorp/external" From 039a45e848323aa825d09d1c1c817f64d7d881f5 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 21:06:25 +0200 Subject: [PATCH 073/122] wip: refactor according to Benjamin C. feedback --- .../bootstrap}/backend.j2.tf | 0 .../bootstrap}/documentdb.tf | 0 .../bootstrap}/ec2-sec-group.tf | 0 .../bootstrap}/ec2-vpc-common.j2.tf | 0 .../bootstrap}/ec2-vpc.j2.tf | 0 .../bootstrap}/ec2.j2.tf | 0 .../bootstrap}/elasticcache.tf | 0 .../bootstrap}/elasticsearch.tf | 0 .../bootstrap}/qovery-vault.j2.tf | 0 .../bootstrap}/rds.tf | 0 .../bootstrap}/s3-qovery-buckets.tf | 0 .../bootstrap}/tf-default-vars.j2.tf | 0 .../bootstrap}/tf-providers-aws.j2.tf | 0 .../chart_values/mongodb/q-values.j2.yaml | 546 +++++++++ .../chart_values/mysql/q-values.j2.yaml | 603 ++++++++++ .../nginx-ingress/nginx-ingress.j2.yaml | 572 +++++++++ .../chart_values/postgresql/q-values.j2.yaml | 568 +++++++++ .../chart_values/redis/q-values.j2.yaml | 788 ++++++++++++ .../charts/q-application}/.helmignore | 0 .../charts/q-application/Chart.j2.yaml | 7 + .../templates/deployment.j2.yaml | 92 ++ .../templates/horizontal_autoscaler.j2.yaml | 19 + .../templates/networkpolicies.j2.yaml | 95 ++ .../q-application/templates/pdb.j2.yaml | 21 + .../q-application/templates/secret.j2.yaml | 17 + .../q-application/templates/service.j2.yaml | 26 + .../templates/statefulset.j2.yaml | 132 +++ .../charts/q-application/values.j2.yaml | 2 + .../charts/q-ingress-tls}/.helmignore | 0 .../charts/q-ingress-tls/Chart.j2.yaml | 6 + .../templates/cert-issuer.j2.yaml | 20 + .../templates/ingress-qovery.j2.yaml | 69 ++ .../charts/q-ingress-tls/values.j2.yaml | 2 + lib/aws-ec2/services/common/backend.j2.tf | 21 + .../services/common/common-variables.j2.tf | 167 +++ lib/aws-ec2/services/common/providers.j2.tf | 52 + lib/aws-ec2/services/mongodb/local-vars.j2.tf | 6 + lib/aws-ec2/services/mongodb/main.j2.tf | 114 ++ lib/aws-ec2/services/mongodb/variables.j2.tf | 43 + lib/aws-ec2/services/mysql/local-vars.j2.tf | 6 + lib/aws-ec2/services/mysql/main.j2.tf | 132 +++ lib/aws-ec2/services/mysql/variables.j2.tf | 67 ++ .../services/postgresql/local-vars.j2.tf | 6 + lib/aws-ec2/services/postgresql/main.j2.tf | 121 ++ .../services/postgresql/variables.j2.tf | 61 + lib/aws-ec2/services/redis/local-vars.j2.tf | 7 + lib/aws-ec2/services/redis/main.j2.tf | 114 ++ lib/aws-ec2/services/redis/variables.j2.tf | 37 + .../{bootstrap-eks => bootstrap}/README.md | 0 .../backend.j2.tf | 0 .../chart_values/external-dns.j2.yaml | 0 .../chart_values/grafana.j2.yaml | 0 .../chart_values/kube-prometheus-stack.yaml | 0 .../chart_values/loki.yaml | 0 .../chart_values/metrics-server.yaml | 0 .../chart_values/nginx-ingress.yaml | 0 .../chart_values/pleco.yaml | 0 .../charts/aws-calico/.helmignore | 0 .../charts/aws-calico/Chart.yaml | 0 .../charts/aws-calico/README.md | 0 .../charts/aws-calico/crds/crds.yaml | 0 .../charts/aws-calico/templates/_helpers.tpl | 0 .../aws-calico/templates/config-map.yaml | 0 .../aws-calico/templates/daemon-set.yaml | 0 .../aws-calico/templates/deployment.yaml | 0 .../templates/pod-disruption-budget.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../charts/aws-calico/templates/rbac.yaml | 0 .../templates/service-accounts.yaml | 0 .../charts/aws-calico/templates/service.yaml | 0 .../charts/aws-calico/values.yaml | 0 .../charts/aws-limits-exporter/.helmignore | 0 .../charts/aws-limits-exporter/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../templates/secrets.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../charts/aws-limits-exporter/values.yaml | 0 .../aws-node-termination-handler}/.helmignore | 0 .../aws-node-termination-handler/Chart.yaml | 0 .../aws-node-termination-handler/README.md | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/daemonset.yaml | 0 .../templates/psp.yaml | 0 .../templates/serviceaccount.yaml | 0 .../aws-node-termination-handler/values.yaml | 0 .../bootstrap/charts/aws-ui-view/.helmignore | 22 + .../charts/aws-ui-view/Chart.yaml | 0 .../charts/aws-ui-view/templates/_helpers.tpl | 0 .../aws-ui-view/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../charts/aws-ui-view/values.yaml | 0 .../bootstrap/charts/aws-vpc-cni/.helmignore | 22 + .../charts/aws-vpc-cni/Chart.yaml | 0 .../charts/aws-vpc-cni/README.md | 0 .../charts/aws-vpc-cni/templates/_helpers.tpl | 0 .../aws-vpc-cni/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../aws-vpc-cni/templates/configmap.yaml | 0 .../templates/customresourcedefinition.yaml | 0 .../aws-vpc-cni/templates/daemonset.yaml | 0 .../aws-vpc-cni/templates/eniconfig.yaml | 0 .../aws-vpc-cni/templates/serviceaccount.yaml | 0 .../charts/aws-vpc-cni/values.yaml | 0 .../charts/coredns-config/.helmignore | 0 .../charts/coredns-config/Chart.yaml | 0 .../coredns-config/templates/_helpers.tpl | 0 .../coredns-config/templates/configmap.yml | 0 .../charts/coredns-config/values.yaml | 0 .../charts/iam-eks-user-mapper/.helmignore | 0 .../charts/iam-eks-user-mapper/Chart.yaml | 0 .../templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../iam-eks-user-mapper/templates/rbac.yaml | 0 .../iam-eks-user-mapper/templates/secret.yaml | 0 .../templates/serviceaccount.yaml | 0 .../charts/iam-eks-user-mapper/values.yaml | 0 .../charts/q-storageclass/.helmignore | 0 .../charts/q-storageclass/Chart.yaml | 0 .../q-storageclass/templates/_helpers.tpl | 0 .../templates/storageclass.yaml | 0 .../charts/q-storageclass/values.yaml | 0 .../documentdb.tf | 0 .../eks-ebs-csi-driver.tf | 0 .../eks-gen-kubectl-config.j2.tf | 0 .../eks-master-cluster.j2.tf | 0 .../eks-master-iam.tf | 0 .../eks-master-sec-group.tf | 0 .../eks-s3-kubeconfig-store.tf | 0 .../eks-vpc-common.j2.tf | 0 .../eks-vpc-with-nat-gateways.j2.tf | 0 .../eks-vpc-without-nat-gateways.j2.tf | 0 .../eks-workers-iam.tf | 0 .../eks-workers-nodes.j2.tf | 0 .../eks-workers-sec-group.tf | 0 .../elasticcache.tf | 0 .../elasticsearch.tf | 0 .../helm-aws-iam-eks-user-mapper.tf | 0 .../helm-cluster-autoscaler.j2.tf | 0 .../helm-grafana.j2.tf | 0 .../helm-loki.j2.tf | 0 .../helm-nginx-ingress.tf | 0 .../{bootstrap-eks => bootstrap}/helper.j2.sh | 0 .../qovery-tf-config.j2.tf | 0 .../qovery-vault.j2.tf | 0 lib/aws/{bootstrap-eks => bootstrap}/rds.tf | 0 .../s3-qovery-buckets.tf | 0 .../tf-default-vars.j2.tf | 0 .../tf-providers-aws.j2.tf | 0 lib/edge/aws/backend.j2.tf | 10 - lib/edge/aws/documentdb.tf | 81 -- lib/edge/aws/eks-vpc-common.j2.tf | 42 - .../aws/eks-vpc-without-nat-gateways.j2.tf | 75 -- lib/edge/aws/elasticcache.tf | 80 -- lib/edge/aws/elasticsearch.tf | 79 -- lib/edge/aws/qovery-vault.j2.tf | 29 - lib/edge/aws/rds.tf | 118 -- lib/edge/aws/s3-qovery-buckets.tf | 44 - lib/edge/aws/tf-default-vars.j2.tf | 319 ----- lib/edge/aws/tf-providers-aws.j2.tf | 60 - src/cloud_provider/aws/kubernetes/ec2.rs | 390 ++++++ src/cloud_provider/aws/kubernetes/eks.rs | 670 +++++++++++ src/cloud_provider/aws/kubernetes/mod.rs | 1054 +---------------- test_utilities/src/common.rs | 4 +- 168 files changed, 5657 insertions(+), 1981 deletions(-) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/backend.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/documentdb.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-sec-group.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-vpc-common.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2-vpc.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/ec2.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/elasticcache.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/elasticsearch.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/qovery-vault.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/rds.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/s3-qovery-buckets.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/tf-default-vars.j2.tf (100%) rename lib/{aws/bootstrap-ec2 => aws-ec2/bootstrap}/tf-providers-aws.j2.tf (100%) create mode 100644 lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/mysql/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml create mode 100644 lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml create mode 100644 lib/aws-ec2/chart_values/redis/q-values.j2.yaml rename lib/{aws/bootstrap-eks/charts/aws-node-termination-handler => aws-ec2/charts/q-application}/.helmignore (100%) create mode 100644 lib/aws-ec2/charts/q-application/Chart.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/secret.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/service.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml create mode 100644 lib/aws-ec2/charts/q-application/values.j2.yaml rename lib/{aws/bootstrap-eks/charts/aws-ui-view => aws-ec2/charts/q-ingress-tls}/.helmignore (100%) create mode 100644 lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml create mode 100644 lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml create mode 100644 lib/aws-ec2/services/common/backend.j2.tf create mode 100644 lib/aws-ec2/services/common/common-variables.j2.tf create mode 100644 lib/aws-ec2/services/common/providers.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/main.j2.tf create mode 100644 lib/aws-ec2/services/mongodb/variables.j2.tf create mode 100644 lib/aws-ec2/services/mysql/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/mysql/main.j2.tf create mode 100644 lib/aws-ec2/services/mysql/variables.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/main.j2.tf create mode 100644 lib/aws-ec2/services/postgresql/variables.j2.tf create mode 100644 lib/aws-ec2/services/redis/local-vars.j2.tf create mode 100644 lib/aws-ec2/services/redis/main.j2.tf create mode 100644 lib/aws-ec2/services/redis/variables.j2.tf rename lib/aws/{bootstrap-eks => bootstrap}/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/backend.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/external-dns.j2.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/grafana.j2.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/kube-prometheus-stack.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/loki.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/metrics-server.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/nginx-ingress.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/chart_values/pleco.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/crds/crds.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/config-map.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/daemon-set.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/pod-disruption-budget.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/podsecuritypolicy.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/rbac.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/service-accounts.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/templates/service.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-calico/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/secrets.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/service.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/templates/servicemonitor.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-limits-exporter/values.yaml (100%) rename lib/aws/{bootstrap-eks/charts/aws-vpc-cni => bootstrap/charts/aws-node-termination-handler}/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/psp.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-node-termination-handler/values.yaml (100%) create mode 100644 lib/aws/bootstrap/charts/aws-ui-view/.helmignore rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-ui-view/values.yaml (100%) create mode 100644 lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/README.md (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/clusterrole.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/clusterrolebinding.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/configmap.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/customresourcedefinition.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/daemonset.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/eniconfig.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/aws-vpc-cni/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/templates/configmap.yml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/coredns-config/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/deployment.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/rbac.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/secret.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/templates/serviceaccount.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/iam-eks-user-mapper/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/.helmignore (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/Chart.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/templates/_helpers.tpl (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/templates/storageclass.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/charts/q-storageclass/values.yaml (100%) rename lib/aws/{bootstrap-eks => bootstrap}/documentdb.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-ebs-csi-driver.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-gen-kubectl-config.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-cluster.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-iam.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-master-sec-group.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-s3-kubeconfig-store.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-common.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-with-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-vpc-without-nat-gateways.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-iam.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-nodes.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/eks-workers-sec-group.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/elasticcache.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/elasticsearch.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-aws-iam-eks-user-mapper.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-cluster-autoscaler.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-grafana.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-loki.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helm-nginx-ingress.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/helper.j2.sh (100%) rename lib/aws/{bootstrap-eks => bootstrap}/qovery-tf-config.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/qovery-vault.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/rds.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/s3-qovery-buckets.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/tf-default-vars.j2.tf (100%) rename lib/aws/{bootstrap-eks => bootstrap}/tf-providers-aws.j2.tf (100%) delete mode 100644 lib/edge/aws/backend.j2.tf delete mode 100644 lib/edge/aws/documentdb.tf delete mode 100644 lib/edge/aws/eks-vpc-common.j2.tf delete mode 100644 lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf delete mode 100644 lib/edge/aws/elasticcache.tf delete mode 100644 lib/edge/aws/elasticsearch.tf delete mode 100644 lib/edge/aws/qovery-vault.j2.tf delete mode 100644 lib/edge/aws/rds.tf delete mode 100644 lib/edge/aws/s3-qovery-buckets.tf delete mode 100644 lib/edge/aws/tf-default-vars.j2.tf delete mode 100644 lib/edge/aws/tf-providers-aws.j2.tf create mode 100644 src/cloud_provider/aws/kubernetes/ec2.rs create mode 100644 src/cloud_provider/aws/kubernetes/eks.rs diff --git a/lib/aws/bootstrap-ec2/backend.j2.tf b/lib/aws-ec2/bootstrap/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/backend.j2.tf rename to lib/aws-ec2/bootstrap/backend.j2.tf diff --git a/lib/aws/bootstrap-ec2/documentdb.tf b/lib/aws-ec2/bootstrap/documentdb.tf similarity index 100% rename from lib/aws/bootstrap-ec2/documentdb.tf rename to lib/aws-ec2/bootstrap/documentdb.tf diff --git a/lib/aws/bootstrap-ec2/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-sec-group.tf rename to lib/aws-ec2/bootstrap/ec2-sec-group.tf diff --git a/lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf b/lib/aws-ec2/bootstrap/ec2-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-vpc-common.j2.tf rename to lib/aws-ec2/bootstrap/ec2-vpc-common.j2.tf diff --git a/lib/aws/bootstrap-ec2/ec2-vpc.j2.tf b/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2-vpc.j2.tf rename to lib/aws-ec2/bootstrap/ec2-vpc.j2.tf diff --git a/lib/aws/bootstrap-ec2/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/ec2.j2.tf rename to lib/aws-ec2/bootstrap/ec2.j2.tf diff --git a/lib/aws/bootstrap-ec2/elasticcache.tf b/lib/aws-ec2/bootstrap/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap-ec2/elasticcache.tf rename to lib/aws-ec2/bootstrap/elasticcache.tf diff --git a/lib/aws/bootstrap-ec2/elasticsearch.tf b/lib/aws-ec2/bootstrap/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap-ec2/elasticsearch.tf rename to lib/aws-ec2/bootstrap/elasticsearch.tf diff --git a/lib/aws/bootstrap-ec2/qovery-vault.j2.tf b/lib/aws-ec2/bootstrap/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/qovery-vault.j2.tf rename to lib/aws-ec2/bootstrap/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap-ec2/rds.tf b/lib/aws-ec2/bootstrap/rds.tf similarity index 100% rename from lib/aws/bootstrap-ec2/rds.tf rename to lib/aws-ec2/bootstrap/rds.tf diff --git a/lib/aws/bootstrap-ec2/s3-qovery-buckets.tf b/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap-ec2/s3-qovery-buckets.tf rename to lib/aws-ec2/bootstrap/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap-ec2/tf-default-vars.j2.tf b/lib/aws-ec2/bootstrap/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/tf-default-vars.j2.tf rename to lib/aws-ec2/bootstrap/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf b/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap-ec2/tf-providers-aws.j2.tf rename to lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf diff --git a/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml b/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml new file mode 100644 index 00000000..9f001e92 --- /dev/null +++ b/lib/aws-ec2/chart_values/mongodb/q-values.j2.yaml @@ -0,0 +1,546 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass +## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +# namespaceOverride: my-global-namespace + +image: + ## Bitnami MongoDB registry + ## + registry: quay.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: true + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: +nameOverride: {{ sanitized_name }} + +## String to fully override mongodb.fullname template +## +# fullnameOverride: +fullnameOverride: {{ sanitized_name }} + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +# Add custom extra environment variables to all the MongoDB containers +# extraEnvVars: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: + limits: {} + requests: + cpu: "{{ total_cpus }}" + memory: "{{ total_ram_in_mib }}Mi" + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +mongodbRootPassword: '{{ database_password }}' + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +mongodbUsername: '{{ database_login }}' +mongodbPassword: '{{ database_password }}' +mongodbDatabase: {{ database_db_name }} + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: false + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + # name: svc-mongo + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + # clusterIP: None + port: {{ private_port }} + qovery_name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +## Use StatefulSet instead of Deployment when deploying standalone +useStatefulSet: true + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + name: rs0 + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + secondary: 1 + arbiter: 1 + # maxUnavailable: + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional abels to apply to the deployment or statefulsets +labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Define Separate nodeSelector for secondaries +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelectorSecondary: {} + +## Define Separate nodeSelector for arbiter +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelectorArbiter: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Add sidecars to the pod +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecars: [] +## Array to add extra volumes +## +extraVolumes: [] +## Array to add extra mounts (normally used with extraVolumes) +## +extraVolumeMounts: [] + +## Add sidecars to the arbiter pod +# used only when replica set is enabled +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecarsArbiter: [] +## Array to add extra volumes to the arbiter +# used only when replica set is enabled +## +extraVolumesArbiter: [] +## Array to add extra mounts (normally used with extraVolumes) to the arbiter +# used only when replica set is enabled +## +extraVolumeMountsArbiter: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # storageClassSecondary: "-" + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.local + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + tls: + - hosts: + - mongodb.local + secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.11.0-debian-10-r45 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml b/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml new file mode 100644 index 00000000..daa4475d --- /dev/null +++ b/lib/aws-ec2/chart_values/mysql/q-values.j2.yaml @@ -0,0 +1,603 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MySQL image +## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## +image: + debug: false + registry: quay.io + repository: bitnami/mysql + tag: "{{ version }}" + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override mysql.fullname template (will maintain the release name) +## +# nameOverride: +nameOverride: {{ sanitized_name }} + +## String to fully override mysql.fullname template +## +# fullnameOverride: +fullnameOverride: {{ sanitized_name }} + +## Cluster domain +## +clusterDomain: cluster.local + +commonLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: + cpu: "{{ database_total_cpus }}" + memory: "{{ database_ram_size_in_mib }}Mi" + +## Use existing secret (ignores root, db and replication passwords) +## +# existingSecret: + +## Admin (root) credentials +## +root: + ## MySQL admin password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + password: '{{ database_password }}' + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount admin password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + +## Custom user/db credentials +## +db: + ## MySQL username and password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run + ## Note that this user should be different from the MySQL replication user (replication.user) + ## + user: '{{ database_login }}' + password: '{{ database_password }}' + ## Database to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run + ## + name: {{ sanitized_name }} + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: true + +## Replication configuration +## +replication: + ## Enable replication. This enables the creation of replicas of MySQL. If false, only a + ## master deployment would be created + ## + enabled: false + ## + ## MySQL replication user + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## Note that this user should be different from the MySQL user (db.user) + ## + user: replicator + ## MySQL replication user password + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: true + ## Mount replication user password as a file instead of using an environment variable + ## + injectSecretsAsVolume: false + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." +# +## ConfigMap with scripts to be run at first boot +## Note: This will override initdbScripts +# initdbScriptsConfigMap: + +serviceAccount: + create: true + ## Specify the name of the service account created/used + # name: + +## Master nodes parameters +## +master: + ## Configure MySQL with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + + ## updateStrategy for master nodes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + + ## Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL master pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + + ## MySQL master containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## Example: + ## containerSecurityContext: + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: {} + + ## MySQL master containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + + ## MySQL master containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Enable persistence using PVCs on master nodes + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath: /bitnami/mysql + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + storageClass: "aws-ebs-gp2-0" + ## PVC annotations + ## + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: {{ database_disk_size_in_gib }}Gi + ## Use an existing PVC + ## + # existingClaim: + +## Slave nodes parameters +## +slave: + ## Number of slave replicas + ## + replicas: 2 + + ## Configure MySQL slave with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + config: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + + ## updateStrategy for slave nodes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + + ## Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + + ## Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Node labels for pod assignment. Evaluated as a template. + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## An array to add extra environment variables + ## For example: + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: + + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## MySQL slave pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + + ## MySQL slave containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## Example: + ## containerSecurityContext: + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: {} + + ## MySQL slave containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + + ## MySQL slave containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + enabled: true + ## Initializing the database could take some time + ## + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + ## Enable persistence using PVCs on slave nodes + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + mountPath: /bitnami/mysql + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## PVC annotations + ## + annotations: {} + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + ## Persistent Volume size + ## + size: {{ database_disk_size_in_gib }}Gi + ## Use an existing PVC + ## + # existingClaim: + +## MySQL Service properties +## +service: + ## MySQL Service type + ## + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + name: {{ service_name }} + + ## MySQL Service port + ## + port: 3306 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: + master: + slave: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP for the MySQL Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + # master: + # slave: + +## MySQL prometheus metrics parameters +## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint +## +metrics: + enabled: false + ## Bitnami MySQL Prometheus exporter image + ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.12.1-debian-10-r127 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## MySQL Prometheus exporter containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 0.5 + # memory: 256Mi + requests: {} + # cpu: 0.5 + # memory: 256Mi + + ## MySQL Prometheus exporter service parameters + ## + service: + type: ClusterIP + port: 9104 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus diff --git a/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml b/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml new file mode 100644 index 00000000..da7b7193 --- /dev/null +++ b/lib/aws-ec2/chart_values/nginx-ingress/nginx-ingress.j2.yaml @@ -0,0 +1,572 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller + tag: "0.30.0" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: true + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: + client_max_body_size: 100m + proxy-body-size: 100m + server-tokens: "false" + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader-{{ id }} + + ## Name of the ingress class to route through this controller + ## + ingressClass: "{{ id }}" + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: true + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: true + namespace: "{{ namespace }}" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: {{ nginx_minimum_replicas }} + + minAvailable: 1 + + resources: + limits: + cpu: {{ nginx_limit_cpu }} + memory: {{ nginx_limit_memory }} + requests: + cpu: {{ nginx_requests_cpu }} + memory: {{ nginx_requests_memory }} + + autoscaling: + enabled: {{ nginx_enable_horizontal_autoscaler }} + minReplicas: {{ nginx_minimum_replicas }} + maxReplicas: {{ nginx_maximum_replicas }} + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + labels: + app_id : "{{ id }}" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "Local" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: k8s.gcr.io/defaultbackend-amd64 + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: true + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" diff --git a/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml b/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml new file mode 100644 index 00000000..3e908351 --- /dev/null +++ b/lib/aws-ec2/chart_values/postgresql/q-values.j2.yaml @@ -0,0 +1,568 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: quay.io + repository: bitnami/postgresql + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +nameOverride: {{ sanitized_name }} + +## String to fully override postgresql.fullname template +## +fullnameOverride: {{ sanitized_name }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## sanitized_name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: true + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +postgresqlPostgresPassword: '{{ database_password }}' + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: '{{ database_login }}' + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlPassword: '{{ database_password }}' + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +postgresqlDatabase: {{ database_db_name }} + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +initdbUser: postgres +initdbPassword: '{{ database_password }}' + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + # clusterIP: None + port: 5432 + name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + annotations: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + annotations: {} + podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: "{{ database_ram_size_in_mib }}Mi" + cpu: "{{ database_total_cpus }}" + +## Add annotations to all the deployed resources +## +commonAnnotiations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r116 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/lib/aws-ec2/chart_values/redis/q-values.j2.yaml b/lib/aws-ec2/chart_values/redis/q-values.j2.yaml new file mode 100644 index 00000000..007bb33a --- /dev/null +++ b/lib/aws-ec2/chart_values/redis/q-values.j2.yaml @@ -0,0 +1,788 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: quay.io + repository: bitnami/redis + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: "{{ version }}" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +nameOverride: {{ sanitized_name }} + +## String to fully override redis.fullname template +## +fullnameOverride: {{ sanitized_name }} + +## Cluster settings +cluster: + enabled: false + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: {{ version }} + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis Sentinel Service properties + service: + ## Redis Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, Redis will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: true + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +usePassword: true +## Redis password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: '{{ database_password }}' +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis Master parameters +## +master: + ## Redis command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis commands to disable + ## + ## Can be used to disable Redis commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + databaseName: {{ sanitized_name }} + podAnnotations: {} + + ## Redis Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis Master Service properties + service: + ## Redis Master Service type + type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %} + port: 6379 + name: {{ service_name }} + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + {% if publicly_accessible -%} + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}" + external-dns.alpha.kubernetes.io/ttl: "300" + {% endif %} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "aws-ebs-gp2-0" + accessModes: + - ReadWriteOnce + size: {{ database_disk_size_in_gib }}Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + databaseId: {{ id }} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis Master pod priorityClassName + ## + priorityClassName: '' + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master +## +slave: + ## Slave Service properties + service: + ## Redis Slave Service type + type: ClusterIP + ## Redis port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis + ## + preExecCmds: "" + ## Additional Redis configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis extra flags + extraFlags: [] + ## List of Redis commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis slave pod priorityClassName + # priorityClassName: '' + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + labels: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.13.1-debian-10-r6 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: '' + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore b/lib/aws-ec2/charts/q-application/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/.helmignore rename to lib/aws-ec2/charts/q-application/.helmignore diff --git a/lib/aws-ec2/charts/q-application/Chart.j2.yaml b/lib/aws-ec2/charts/q-application/Chart.j2.yaml new file mode 100644 index 00000000..c8a03105 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/Chart.j2.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: qovery +description: A Qovery Helm chart for Kubernetes deployments +type: application +version: 0.2.0 +appVersion: {{ helm_app_version }} +icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png diff --git a/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml b/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml new file mode 100644 index 00000000..84053b29 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/deployment.j2.yaml @@ -0,0 +1,92 @@ +{%- if not is_storage %} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %} +spec: + replicas: {{ min_instances }} + strategy: + type: RollingUpdate + {% if max_instances == 1 %} + rollingUpdate: + maxSurge: 1 + {% endif %} + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + template: + metadata: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - {{ sanitized_name }} + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + terminationGracePeriodSeconds: 60 + securityContext: {} + {%- if is_registry_secret %} + imagePullSecrets: + - name: {{ registry_secret }} + {%- endif %} + containers: + - name: {{ sanitized_name }} + image: "{{ image_name_with_tag }}" + env: + {%- for ev in environment_variables %} + - name: "{{ ev.key }}" + valueFrom: + secretKeyRef: + name: {{ sanitized_name }} + key: {{ ev.key }} + {%- endfor %} + {%- if private_port %} + ports: + {%- for port in ports %} + - containerPort: {{ port.port }} + name: "p{{ port.port }}" + protocol: TCP + {%- endfor %} + readinessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 20 + {%- endif %} + resources: + limits: + cpu: {{ cpu_burst }} + memory: {{ total_ram_in_mib }}Mi + requests: + cpu: {{ total_cpus }} + memory: {{ total_ram_in_mib }}Mi +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml b/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml new file mode 100644 index 00000000..d14331e7 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/horizontal_autoscaler.j2.yaml @@ -0,0 +1,19 @@ +{%- if not is_storage and min_instances != max_instances %} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + envId: {{ environment_id }} + appId: {{ id }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ sanitized_name }} + minReplicas: {{ min_instances }} + maxReplicas: {{ max_instances }} + targetCPUUtilizationPercentage: 60 +{%- endif %} + diff --git a/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml b/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml new file mode 100644 index 00000000..14ab70f4 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/networkpolicies.j2.yaml @@ -0,0 +1,95 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-default + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + # Deny all ingress by default to this application + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-app-access + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + # Then allow some ingress to this application + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + ingress: + # Allow ingress from same environment + - from: + - podSelector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + + # Allow ingress from everywhere but only to application port + {% if is_private_port %} + - ports: + - port: {{ private_port }} + {% endif %} + # FIXME(sileht): Previous rule is not perfect as other pods/namespaces can + # access to the application port without going through the Ingress object, + # but that's not critical neither + # Only way to fix that is to allow lb and kube-proxy to access the namespace/pods explictly via IP, eg: + # - from: + # - ipBlock: + # cidr: 10.0.99.179/32 + # - ipBlock: + # cidr: 10.0.28.216/32 + # - ipBlock: + # cidr: 10.0.98.42/32 + # - ipBlock: + # cidr: 10.0.59.208/32 + # Since user pods, kube-proxy, and lbs are all in 10.0.0.0/8 we can't write generic rule like: + # - ipBlock: + # cidr: 0.0.0.0/0 + # except: [10.0.0.0/8] + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ sanitized_name }}-deny-aws-metadata-server + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + podSelector: + matchLabels: + appId: {{ id }} + app: {{ sanitized_name }} + ownerId: {{ owner_id }} + envId: {{ environment_id }} + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 169.254.169.254/32 diff --git a/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml b/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml new file mode 100644 index 00000000..4e8b8015 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/pdb.j2.yaml @@ -0,0 +1,21 @@ +{%- if not is_storage %} +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +spec: + maxUnavailable: 10% + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +{%- endif %} \ No newline at end of file diff --git a/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml b/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml new file mode 100644 index 00000000..86625f6e --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/secret.j2.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} +type: Opaque +data: + {%- for ev in environment_variables %} + {{ ev.key }}: |- + {{ ev.value }} + {%- endfor %} diff --git a/lib/aws-ec2/charts/q-application/templates/service.j2.yaml b/lib/aws-ec2/charts/q-application/templates/service.j2.yaml new file mode 100644 index 00000000..bb258f90 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/service.j2.yaml @@ -0,0 +1,26 @@ +{%- if (ports is defined) and ports %} +apiVersion: v1 +kind: Service +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +spec: + type: ClusterIP + ports: + {%- for port in ports %} + - protocol: TCP + name: "p{{ port.port }}" + port: {{ port.port }} + targetPort: {{ port.port }} + {%- endfor %} + selector: + ownerId: {{ owner_id }} + appId: {{ id }} + app: {{ sanitized_name }} + envId: {{ environment_id }} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml b/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml new file mode 100644 index 00000000..fb7cf72b --- /dev/null +++ b/lib/aws-ec2/charts/q-application/templates/statefulset.j2.yaml @@ -0,0 +1,132 @@ +{%- if is_storage %} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %} +spec: + replicas: {{ min_instances }} + serviceName: {{ sanitized_name }} + selector: + matchLabels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + template: + metadata: + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + annotations: + checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - {{ sanitized_name }} + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + terminationGracePeriodSeconds: 60 + securityContext: {} + {%- if is_registry_secret %} + imagePullSecrets: + - name: {{ registry_secret }} + {%- endif %} + containers: + - name: {{ sanitized_name }} + image: "{{ image_name_with_tag }}" + env: + {%- for ev in environment_variables %} + - name: "{{ ev.key }}" + valueFrom: + secretKeyRef: + name: {{ sanitized_name }} + key: {{ ev.key }} + {%- endfor %} + {%- if private_port %} + ports: + {%- for port in ports %} + - containerPort: {{ port.port }} + name: "p{{ port.port }}" + protocol: TCP + {%- endfor %} + readinessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: {{ private_port }} + initialDelaySeconds: {{ start_timeout_in_seconds }} + periodSeconds: 20 + {%- endif %} + resources: + limits: + cpu: {{ cpu_burst }} + memory: {{ total_ram_in_mib }}Mi + requests: + cpu: {{ total_cpus }} + memory: {{ total_ram_in_mib }}Mi + volumeMounts: +{%- for s in storage %} + - name: {{ s.id }} + mountPath: {{ s.mount_point }} +{%- endfor %} + volumeClaimTemplates: +{%- for s in storage %} +{% if clone %} + - metadata: + name: {{ s.id }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + app: {{ sanitized_name }} + diskId: {{ s.id }} + diskType: {{ s.storage_type }} + spec: + accessModes: + - ReadWriteOnce + storageClassName: aws-ebs-{{ s.storage_type }}-0 + dataSource: + name: {{ s.id }} + kind: PersistentVolumeClaim + resources: + requests: + storage: {{ disk.size_in_gib }}Gi +{% else %} + - metadata: + name: {{ s.id }} + labels: + ownerId: {{ owner_id }} + envId: {{ environment_id }} + appId: {{ id }} + diskId: {{ s.id }} + diskType: {{ s.storage_type }} + spec: + accessModes: + - ReadWriteOnce + storageClassName: aws-ebs-{{ s.storage_type }}-0 + resources: + requests: + storage: {{ s.size_in_gib }}Gi +{%- endif %} +{%- endfor %} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-application/values.j2.yaml b/lib/aws-ec2/charts/q-application/values.j2.yaml new file mode 100644 index 00000000..667115e7 --- /dev/null +++ b/lib/aws-ec2/charts/q-application/values.j2.yaml @@ -0,0 +1,2 @@ +# Don't add anyhting here +# Jinja2 is taken on behalf of Go template diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore b/lib/aws-ec2/charts/q-ingress-tls/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/.helmignore rename to lib/aws-ec2/charts/q-ingress-tls/.helmignore diff --git a/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml new file mode 100644 index 00000000..060f9a9e --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/Chart.j2.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: qovery +description: A Qovery Helm chart for Kubernetes deployments +type: application +version: 0.2.0 +icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png diff --git a/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml new file mode 100644 index 00000000..cfb54db6 --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/templates/cert-issuer.j2.yaml @@ -0,0 +1,20 @@ +{%- if custom_domains|length > 0 %} +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: {{ id }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} +spec: + acme: + server: {{ spec_acme_server }} + email: {{ spec_acme_email }} + privateKeySecretRef: + name: acme-{{ id }}-key + solvers: + - http01: + ingress: + class: nginx-qovery +{%- endif %} diff --git a/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml new file mode 100644 index 00000000..5cf11a24 --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml @@ -0,0 +1,69 @@ +{%- if routes|length >= 1 %} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ sanitized_name }} + namespace: {{ namespace }} + labels: + ownerId: {{ owner_id }} + routerName: {{ sanitized_name }} + routerId: {{ id }} + envId: {{ environment_id }} + fqdn: "{{ router_default_domain }}" + annotations: + external-dns.alpha.kubernetes.io/hostname: {{ router_default_domain }} + external-dns.alpha.kubernetes.io/ttl: "300" + kubernetes.io/tls-acme: "true" + {%- if custom_domains|length > 0 %} + cert-manager.io/issuer: {{ id }} + {%- else %} + cert-manager.io/cluster-issuer: {{ metadata_annotations_cert_manager_cluster_issuer }} + {%- endif %} + kubernetes.io/ingress.class: "nginx-qovery" + ingress.kubernetes.io/ssl-redirect: "true" + #nginx.ingress.kubernetes.io/enable-cors: "true" + #nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,x-csrftoken" + {%- if sticky_sessions_enabled == true %} + # https://kubernetes.github.io/ingress-nginx/examples/affinity/cookie/ + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-secure: "true" + nginx.ingress.kubernetes.io/session-cookie-name: "INGRESSCOOKIE_QOVERY" + nginx.ingress.kubernetes.io/session-cookie-max-age: "85400" # 1 day + nginx.ingress.kubernetes.io/session-cookie-expires: "85400" # 1 day + nginx.ingress.kubernetes.io/session-cookie-samesite: "Lax" + {%- endif %} +spec: + tls: + {%- if custom_domains|length > 0 %} + - secretName: "router-tls-{{ id }}" + hosts: + {%- for domain in custom_domains %} + - "{{ domain.domain }}" + {%- endfor %} + {%- endif %} + # We dont use secret name as we want to rely on default tls certificate from ingress controller + # which has our wildcard certificate https://cert-manager.io/next-docs/faq/kubed/ + rules: + - host: "{{ router_default_domain }}" + http: + paths: + {%- for route in routes %} + - path: "{{ route.path }}" + backend: + serviceName: "{{ route.application_name }}" + servicePort: {{ route.application_port }} + {%- endfor %} + {%- for domain in custom_domains %} + - host: "{{ domain.domain }}" + http: + paths: + {%- for route in routes %} + - path: "{{ route.path }}" + backend: + serviceName: "{{ route.application_name }}" + servicePort: {{ route.application_port }} + {%- endfor %} + {%- endfor %} +{%- endif %} diff --git a/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml b/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml new file mode 100644 index 00000000..2afc8c8a --- /dev/null +++ b/lib/aws-ec2/charts/q-ingress-tls/values.j2.yaml @@ -0,0 +1,2 @@ +# Don't add anyhting here(git hash-object -t tree /dev/null) +# Jinja2 is taken on behalf of Go template diff --git a/lib/aws-ec2/services/common/backend.j2.tf b/lib/aws-ec2/services/common/backend.j2.tf new file mode 100644 index 00000000..f0746dc0 --- /dev/null +++ b/lib/aws-ec2/services/common/backend.j2.tf @@ -0,0 +1,21 @@ +terraform { + backend "kubernetes" { + secret_suffix = "{{ tfstate_suffix_name }}" + load_config_file = true + config_path = "{{ kubeconfig_path }}" + namespace = "{{ namespace }}" + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws-iam-authenticator" + args = [ + "token", + "-i", + "qovery-{{kubernetes_cluster_id}}"] + env = { + AWS_ACCESS_KEY_ID = "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}" + AWS_DEFAULT_REGION = "{{ region }}" + } + } + } +} diff --git a/lib/aws-ec2/services/common/common-variables.j2.tf b/lib/aws-ec2/services/common/common-variables.j2.tf new file mode 100644 index 00000000..0d52b167 --- /dev/null +++ b/lib/aws-ec2/services/common/common-variables.j2.tf @@ -0,0 +1,167 @@ +# Qovery +variable "cluster_name" { + description = "Kubernetes cluster name" + default = "{{ cluster_name }}" + type = string +} + +variable "region" { + description = "AWS region to store terraform state and lock" + default = "{{ region }}" + type = string +} + +variable "kubernetes_cluster_id" { + description = "Kubernetes cluster name with region" + default = "{{ kubernetes_cluster_id }}" + type = string +} + +variable "region_cluster_name" { + description = "AWS region to store terraform state and lock" + default = "{{ region }}-{{ cluster_name }}" + type = string +} + +variable "q_project_id" { + description = "Qovery project ID" + default = "{{ project_id }}" + type = string +} + +variable "q_customer_id" { + description = "Qovery customer ID" + default = "{{ owner_id }}" + type = string +} + +variable "q_environment_id" { + description = "Qovery client environment" + default = "{{ environment_id }}" + type = string +} + +variable "database_tags" { + description = "Qovery database tags" + default = { + "cluster_name" = "{{ cluster_name }}" + "cluster_id" = "{{ kubernetes_cluster_id }}" + "region" = "{{ region }}" + "q_client_id" = "{{ owner_id }}" + "q_environment_id" = "{{ environment_id }}" + "q_project_id" = "{{ project_id }}" + {% if resource_expiration_in_seconds is defined %} + "ttl" = "{{ resource_expiration_in_seconds }}" + {% endif %} + {% if snapshot is defined and snapshot["snapshot_id"] %} meta_last_restored_from = { { snapshot['snapshot_id'] } } + {% endif %} + } + type = map +} + +{%- if resource_expiration_in_seconds is defined %} +# Pleco ttl +variable "resource_expiration_in_seconds" { + description = "Resource expiration in seconds" + default = {{resource_expiration_in_seconds}} + type = number +} +{% endif %} + +{%- if snapshot is defined %} +# Snapshots +variable "snapshot_identifier" { + description = "Snapshot ID to restore" + default = "{{ snapshot['snapshot_id']}}" + type = string +} +{% endif %} + +# Network + +variable "publicly_accessible" { + description = "Instance publicly accessible" + default = {{ publicly_accessible }} + type = bool +} + +variable "multi_az" { + description = "Multi availability zones" + default = true + type = bool +} + +# Upgrades + +variable "auto_minor_version_upgrade" { + description = "Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window" + default = true + type = bool +} + +variable "apply_changes_now" { + description = "Apply changes now or during the during the maintenance window" + default = false + type = bool +} + +variable "preferred_maintenance_window" { + description = "Maintenance window" + default = "Tue:02:00-Tue:04:00" + type = string +} + +# Monitoring + +variable "performance_insights_enabled" { + description = "Specifies whether Performance Insights are enabled" + default = true + type = bool +} + +variable "performance_insights_enabled_retention" { + description = "The amount of time in days to retain Performance Insights data" + default = 7 + type = number +} + +# Backups + +variable "backup_retention_period" { + description = "Backup retention period" + default = 14 + type = number +} + +variable "preferred_backup_window" { + description = "Maintenance window" + default = "00:00-01:00" + type = string +} + +variable "delete_automated_backups" { + description = "Delete automated backups" + default = {{delete_automated_backups}} + type = bool +} + +variable "skip_final_snapshot" { + description = "Skip final snapshot" + default = {{ skip_final_snapshot }} + type = bool +} + +variable "final_snapshot_name" { + description = "Name of the final snapshot before the database goes deleted" + default = "{{ final_snapshot_name }}" + type = string +} + +{%- if snapshot is defined %} +# Snapshots +variable "snapshot_identifier" { + description = "Snapshot ID to restore" + default = "{{ snapshot['snapshot_id']}}" + type = string +} +{% endif %} \ No newline at end of file diff --git a/lib/aws-ec2/services/common/providers.j2.tf b/lib/aws-ec2/services/common/providers.j2.tf new file mode 100644 index 00000000..95abc7b1 --- /dev/null +++ b/lib/aws-ec2/services/common/providers.j2.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 3.36.0" + } + helm = { + source = "hashicorp/helm" + version = "~> 1.3.2" + } + local = { + source = "hashicorp/local" + version = "~> 1.4" + } + time = { + source = "hashicorp/time" + version = "~> 0.3" + } + } + required_version = ">= 0.14" +} + +provider "aws" { + profile = "default" + region = "{{ region }}" + access_key = "{{ aws_access_key }}" + secret_key = "{{ aws_secret_key }}" +} + +data aws_eks_cluster eks_cluster { + name = "qovery-{{kubernetes_cluster_id}}" +} + +provider "helm" { + kubernetes { + host = data.aws_eks_cluster.eks_cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority.0.data) + load_config_file = false + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws-iam-authenticator" + args = ["token", "-i", "qovery-{{kubernetes_cluster_id}}"] + env = { + AWS_ACCESS_KEY_ID = "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}" + AWS_DEFAULT_REGION = "{{ region }}" + } + } + } +} + +resource "time_static" "on_db_create" {} diff --git a/lib/aws-ec2/services/mongodb/local-vars.j2.tf b/lib/aws-ec2/services/mongodb/local-vars.j2.tf new file mode 100644 index 00000000..4ea911fc --- /dev/null +++ b/lib/aws-ec2/services/mongodb/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + mongodb_database_tags = merge (var.database_tags, { + database_identifier = var.documentdb_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mongodb/main.j2.tf b/lib/aws-ec2/services/mongodb/main.j2.tf new file mode 100644 index 00000000..8f20c381 --- /dev/null +++ b/lib/aws-ec2/services/mongodb/main.j2.tf @@ -0,0 +1,114 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["DocumentDB"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +resource "helm_release" "documentdb_instance_external_name" { + name = "${aws_docdb_cluster.documentdb_cluster.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_docdb_cluster.documentdb_cluster.endpoint + } + + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + + set { + name = "app_id" + value = "{{database_id}}" + } + + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_docdb_cluster.documentdb_cluster + ] +} + +resource "aws_docdb_cluster_instance" "documentdb_cluster_instances" { + count = var.documentdb_instances_number + + cluster_identifier = aws_docdb_cluster.documentdb_cluster.id + identifier = "${var.documentdb_identifier}-${count.index}" + + instance_class = var.instance_class + + # Maintenance and upgrade + auto_minor_version_upgrade = var.auto_minor_version_upgrade + preferred_maintenance_window = var.preferred_maintenance_window + + tags = local.mongodb_database_tags +} + +resource "aws_docdb_cluster" "documentdb_cluster" { + cluster_identifier = var.documentdb_identifier + + tags = local.mongodb_database_tags + + # DocumentDB instance basics + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + master_password = var.password + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + master_username = var.username + engine = "docdb" + {%- endif %} + storage_encrypted = var.encrypt_disk + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + + # Backups + backup_retention_period = var.backup_retention_period + preferred_backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} +} diff --git a/lib/aws-ec2/services/mongodb/variables.j2.tf b/lib/aws-ec2/services/mongodb/variables.j2.tf new file mode 100644 index 00000000..c2d5a36f --- /dev/null +++ b/lib/aws-ec2/services/mongodb/variables.j2.tf @@ -0,0 +1,43 @@ +# documentdb instance basics + +variable "documentdb_identifier" { + description = "Documentdb cluster name (Cluster identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "documentdb_instances_number" { + description = "DocumentDB instance numbers" + default = 1 + type = number +} + +variable "port" { + description = "Documentdb instance port" + default = {{ database_port }} + type = number +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{database_instance_type}}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mysql/local-vars.j2.tf b/lib/aws-ec2/services/mysql/local-vars.j2.tf new file mode 100644 index 00000000..0f6731e1 --- /dev/null +++ b/lib/aws-ec2/services/mysql/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + mysql_database_tags = merge (var.database_tags, { + database_identifier = var.mysql_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/mysql/main.j2.tf b/lib/aws-ec2/services/mysql/main.j2.tf new file mode 100644 index 00000000..68e84b13 --- /dev/null +++ b/lib/aws-ec2/services/mysql/main.j2.tf @@ -0,0 +1,132 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["RDS"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +data "aws_iam_role" "rds_enhanced_monitoring" { + name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}" +} + +resource "helm_release" "mysql_instance_external_name" { + name = "${aws_db_instance.mysql_instance.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_db_instance.mysql_instance.address + } + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + set { + name = "app_id" + value = "{{database_id}}" + } + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_db_instance.mysql_instance + ] +} + +resource "aws_db_parameter_group" "mysql_parameter_group" { + name = "qovery-${var.mysql_identifier}" + family = var.parameter_group_family + + tags = local.mysql_database_tags + + # Set superuser permission to the default 'username' account + parameter { + name = "log_bin_trust_function_creators" + value = "1" + } +} + +# Non snapshoted version +resource "aws_db_instance" "mysql_instance" { + identifier = var.mysql_identifier + + tags = local.mysql_database_tags + + # MySQL instance basics + instance_class = var.instance_class + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + password = var.password + name = var.database_name + parameter_group_name = aws_db_parameter_group.mysql_parameter_group.name + storage_encrypted = var.encrypt_disk + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + allocated_storage = var.disk_size + storage_type = var.storage_type + username = var.username + engine_version = var.mysql_version + engine = "mysql" + ca_cert_identifier = "rds-ca-2019" + {%- endif %} + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + publicly_accessible = var.publicly_accessible + multi_az = var.multi_az + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + auto_minor_version_upgrade = var.auto_minor_version_upgrade + maintenance_window = var.preferred_maintenance_window + + # Monitoring + monitoring_interval = 10 + monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn + + # Backups + backup_retention_period = var.backup_retention_period + backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} + copy_tags_to_snapshot = true + delete_automated_backups = var.delete_automated_backups + +} diff --git a/lib/aws-ec2/services/mysql/variables.j2.tf b/lib/aws-ec2/services/mysql/variables.j2.tf new file mode 100644 index 00000000..84d6e420 --- /dev/null +++ b/lib/aws-ec2/services/mysql/variables.j2.tf @@ -0,0 +1,67 @@ +# MySQL instance basics + +variable "mysql_identifier" { + description = "MySQL instance name (DB identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "port" { + description = "MySQL instance port" + default = {{ database_port }} + type = number +} + +variable "disk_size" { + description = "disk instance size" + default = {{ database_disk_size_in_gib }} + type = number +} + +variable "mysql_version" { + description = "MySQL version" + default = "{{ version }}" + type = string +} + +variable "parameter_group_family" { + description = "RDS parameter group family" + default = "{{ parameter_group_family }}" + type = string +} + +variable "storage_type" { + description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)." + default = "{{ database_disk_type }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{database_instance_type}}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "database_name" { + description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance" + default = "{{ database_name }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/postgresql/local-vars.j2.tf b/lib/aws-ec2/services/postgresql/local-vars.j2.tf new file mode 100644 index 00000000..bf86e787 --- /dev/null +++ b/lib/aws-ec2/services/postgresql/local-vars.j2.tf @@ -0,0 +1,6 @@ +locals { + postgres_database_tags = merge (var.database_tags, { + database_identifier = var.postgresql_identifier + creationDate = time_static.on_db_create.rfc3339 + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/postgresql/main.j2.tf b/lib/aws-ec2/services/postgresql/main.j2.tf new file mode 100644 index 00000000..9c0517db --- /dev/null +++ b/lib/aws-ec2/services/postgresql/main.j2.tf @@ -0,0 +1,121 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "k8s_subnet_ids" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["RDS"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +data "aws_iam_role" "rds_enhanced_monitoring" { + name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}" +} + +resource "helm_release" "postgres_instance_external_name" { + name = "${aws_db_instance.postgresql_instance.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_db_instance.postgresql_instance.address + } + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + set { + name = "app_id" + value = "{{database_id}}" + } + set { + name = "service_name" + value = "{{service_name}}" + } + + depends_on = [ + aws_db_instance.postgresql_instance + ] +} + + +# Non snapshoted version +resource "aws_db_instance" "postgresql_instance" { + identifier = var.postgresql_identifier + + tags = local.postgres_database_tags + + # Postgres instance basics + instance_class = var.instance_class + port = var.port + timeouts { + create = "60m" + update = "120m" + delete = "60m" + } + password = var.password + storage_encrypted = var.encrypt_disk + {%- if snapshot and snapshot["snapshot_id"] %} + # Snapshot + snapshot_identifier = var.snapshot_identifier + {%- else %} + allocated_storage = var.disk_size + name = var.database_name + storage_type = var.storage_type + username = var.username + engine_version = var.postgresql_version + engine = "postgres" + ca_cert_identifier = "rds-ca-2019" + {%- endif %} + + # Network + db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id + vpc_security_group_ids = data.aws_security_group.selected.*.id + publicly_accessible = var.publicly_accessible + multi_az = var.multi_az + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + auto_minor_version_upgrade = var.auto_minor_version_upgrade + maintenance_window = var.preferred_maintenance_window + + # Monitoring + performance_insights_enabled = var.performance_insights_enabled + performance_insights_retention_period = var.performance_insights_enabled_retention + monitoring_interval = 10 + monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn + + # Backups + backup_retention_period = var.backup_retention_period + backup_window = var.preferred_backup_window + skip_final_snapshot = var.skip_final_snapshot + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} + copy_tags_to_snapshot = true + delete_automated_backups = var.delete_automated_backups + +} diff --git a/lib/aws-ec2/services/postgresql/variables.j2.tf b/lib/aws-ec2/services/postgresql/variables.j2.tf new file mode 100644 index 00000000..9feac6cd --- /dev/null +++ b/lib/aws-ec2/services/postgresql/variables.j2.tf @@ -0,0 +1,61 @@ +# PostgreSQL instance basics + +variable "postgresql_identifier" { + description = "PostgreSQL instance name (DB identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "port" { + description = "PostgreSQL instance port" + default = "{{ database_port }}" + type = number +} + +variable "disk_size" { + description = "disk instance size" + default = "{{ database_disk_size_in_gib }}" + type = number +} + +variable "postgresql_version" { + description = "Postgresql version" + default = "{{ version }}" + type = string +} + +variable "storage_type" { + description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)." + default = "{{ database_disk_type }}" + type = string +} + +variable "encrypt_disk" { + description = "Enable disk encryption" + default = "{{ encrypt_disk }}" + type = string +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html" + default = "{{ database_instance_type }}" + type = string +} + +variable "username" { + description = "Admin username for the master DB user" + default = "{{ database_login }}" + type = string +} + +variable "password" { + description = "Admin password for the master DB user" + default = "{{ database_password }}" + type = string +} + +variable "database_name" { + description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance" + default = "{{ database_name }}" + type = string +} \ No newline at end of file diff --git a/lib/aws-ec2/services/redis/local-vars.j2.tf b/lib/aws-ec2/services/redis/local-vars.j2.tf new file mode 100644 index 00000000..96f2d53d --- /dev/null +++ b/lib/aws-ec2/services/redis/local-vars.j2.tf @@ -0,0 +1,7 @@ +locals { + redis_database_tags = merge (var.database_tags, { + database_identifier = var.elasticache_identifier + creationDate = time_static.on_db_create.rfc3339 + {% if snapshot is defined and snapshot["snapshot_id"] %}meta_last_restored_from = var.snapshot_identifier{% endif %} + }) +} \ No newline at end of file diff --git a/lib/aws-ec2/services/redis/main.j2.tf b/lib/aws-ec2/services/redis/main.j2.tf new file mode 100644 index 00000000..98b2da8d --- /dev/null +++ b/lib/aws-ec2/services/redis/main.j2.tf @@ -0,0 +1,114 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } +} + +data "aws_subnet_ids" "selected" { + vpc_id = data.aws_vpc.selected.id + filter { + name = "tag:ClusterId" + values = [var.kubernetes_cluster_id] + } + filter { + name = "tag:Service" + values = ["Elasticache"] + } +} + +data "aws_security_group" "selected" { + filter { + name = "tag:Name" + values = ["qovery-eks-workers"] + } + filter { + name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}" + values = ["owned"] + } +} + +resource "helm_release" "elasticache_instance_external_name" { + name = "${aws_elasticache_cluster.elasticache_cluster.id}-externalname" + chart = "external-name-svc" + namespace = "{{namespace}}" + atomic = true + max_history = 50 + + set { + name = "target_hostname" + value = aws_elasticache_cluster.elasticache_cluster.cache_nodes.0.address + } + + set { + name = "source_fqdn" + value = "{{database_fqdn}}" + } + + set { + name = "app_id" + value = "{{database_id}}" + } + + set { + name = "service_name" + value = "{{service_name}}" + } + + set { + name = "publicly_accessible" + value = var.publicly_accessible + } + + depends_on = [ + aws_elasticache_cluster.elasticache_cluster + ] +} + +resource "aws_elasticache_cluster" "elasticache_cluster" { + cluster_id = var.elasticache_identifier + + tags = local.redis_database_tags + + # Elasticache instance basics + port = var.port + engine_version = var.elasticache_version + # Thanks GOD AWS for not using SemVer and adding your own versioning system, + # need to add this dirty trick while Hashicorp fix this issue + # https://github.com/hashicorp/terraform-provider-aws/issues/15625 + lifecycle { + ignore_changes = [engine_version] + } + + {%- if replication_group_id is defined %} + # todo: add cluster mode and replicas support + {%- else %} + engine = "redis" + node_type = var.instance_class + num_cache_nodes = var.elasticache_instances_number + parameter_group_name = var.parameter_group_name + {%- endif %} + + {%- if snapshot is defined and snapshot["snapshot_id"] %} + # Snapshot + snapshot_name = var.snapshot_identifier + {%- endif %} + + # Network + # WARNING: this value cna't get fetch from data sources and is linked to the bootstrap phase + subnet_group_name = "elasticache-${data.aws_vpc.selected.id}" + + # Security + security_group_ids = data.aws_security_group.selected.*.id + + # Maintenance and upgrades + apply_immediately = var.apply_changes_now + maintenance_window = var.preferred_maintenance_window + + # Backups + snapshot_window = var.preferred_backup_window + snapshot_retention_limit = var.backup_retention_period + {%- if not skip_final_snapshot %} + final_snapshot_identifier = var.final_snapshot_name + {%- endif %} +} diff --git a/lib/aws-ec2/services/redis/variables.j2.tf b/lib/aws-ec2/services/redis/variables.j2.tf new file mode 100644 index 00000000..2383a83e --- /dev/null +++ b/lib/aws-ec2/services/redis/variables.j2.tf @@ -0,0 +1,37 @@ +# elasticache instance basics + +variable "elasticache_identifier" { + description = "Elasticache cluster name (Cluster identifier)" + default = "{{ fqdn_id }}" + type = string +} + +variable "elasticache_version" { + description = "Elasticache version" + default = "{{ version }}" + type = string +} + +variable "parameter_group_name" { + description = "Elasticache parameter group name" + default = "{{ database_elasticache_parameter_group_name }}" + type = string +} + +variable "elasticache_instances_number" { + description = "Elasticache instance numbers" + default = 1 + type = number +} + +variable "port" { + description = "Elasticache instance port" + default = {{ database_port }} + type = number +} + +variable "instance_class" { + description = "Type of instance: https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html" + default = "{{database_instance_type}}" + type = string +} \ No newline at end of file diff --git a/lib/aws/bootstrap-eks/README.md b/lib/aws/bootstrap/README.md similarity index 100% rename from lib/aws/bootstrap-eks/README.md rename to lib/aws/bootstrap/README.md diff --git a/lib/aws/bootstrap-eks/backend.j2.tf b/lib/aws/bootstrap/backend.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/backend.j2.tf rename to lib/aws/bootstrap/backend.j2.tf diff --git a/lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml b/lib/aws/bootstrap/chart_values/external-dns.j2.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/external-dns.j2.yaml rename to lib/aws/bootstrap/chart_values/external-dns.j2.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml b/lib/aws/bootstrap/chart_values/grafana.j2.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/grafana.j2.yaml rename to lib/aws/bootstrap/chart_values/grafana.j2.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml b/lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/kube-prometheus-stack.yaml rename to lib/aws/bootstrap/chart_values/kube-prometheus-stack.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/loki.yaml b/lib/aws/bootstrap/chart_values/loki.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/loki.yaml rename to lib/aws/bootstrap/chart_values/loki.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/metrics-server.yaml b/lib/aws/bootstrap/chart_values/metrics-server.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/metrics-server.yaml rename to lib/aws/bootstrap/chart_values/metrics-server.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml b/lib/aws/bootstrap/chart_values/nginx-ingress.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/nginx-ingress.yaml rename to lib/aws/bootstrap/chart_values/nginx-ingress.yaml diff --git a/lib/aws/bootstrap-eks/chart_values/pleco.yaml b/lib/aws/bootstrap/chart_values/pleco.yaml similarity index 100% rename from lib/aws/bootstrap-eks/chart_values/pleco.yaml rename to lib/aws/bootstrap/chart_values/pleco.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/.helmignore b/lib/aws/bootstrap/charts/aws-calico/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/.helmignore rename to lib/aws/bootstrap/charts/aws-calico/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/Chart.yaml rename to lib/aws/bootstrap/charts/aws-calico/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/README.md b/lib/aws/bootstrap/charts/aws-calico/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/README.md rename to lib/aws/bootstrap/charts/aws-calico/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml b/lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/crds/crds.yaml rename to lib/aws/bootstrap/charts/aws-calico/crds/crds.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-calico/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/config-map.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/config-map.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/daemon-set.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/deployment.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/pod-disruption-budget.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/podsecuritypolicy.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/rbac.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/service-accounts.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/service-accounts.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/templates/service.yaml rename to lib/aws/bootstrap/charts/aws-calico/templates/service.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-calico/values.yaml b/lib/aws/bootstrap/charts/aws-calico/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-calico/values.yaml rename to lib/aws/bootstrap/charts/aws-calico/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore b/lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/.helmignore rename to lib/aws/bootstrap/charts/aws-limits-exporter/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/Chart.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/deployment.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/secrets.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/service.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/service.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/templates/servicemonitor.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml b/lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-limits-exporter/values.yaml rename to lib/aws/bootstrap/charts/aws-limits-exporter/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/.helmignore rename to lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/Chart.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/README.md rename to lib/aws/bootstrap/charts/aws-node-termination-handler/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/daemonset.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/psp.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-node-termination-handler/values.yaml rename to lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-ui-view/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml b/lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/Chart.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-ui-view/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml b/lib/aws/bootstrap/charts/aws-ui-view/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-ui-view/values.yaml rename to lib/aws/bootstrap/charts/aws-ui-view/values.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/Chart.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/README.md rename to lib/aws/bootstrap/charts/aws-vpc-cni/README.md diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrole.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/clusterrolebinding.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/configmap.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/customresourcedefinition.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/daemonset.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/eniconfig.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/aws-vpc-cni/values.yaml rename to lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/.helmignore b/lib/aws/bootstrap/charts/coredns-config/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/.helmignore rename to lib/aws/bootstrap/charts/coredns-config/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml b/lib/aws/bootstrap/charts/coredns-config/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/Chart.yaml rename to lib/aws/bootstrap/charts/coredns-config/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl b/lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/coredns-config/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml b/lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/templates/configmap.yml rename to lib/aws/bootstrap/charts/coredns-config/templates/configmap.yml diff --git a/lib/aws/bootstrap-eks/charts/coredns-config/values.yaml b/lib/aws/bootstrap/charts/coredns-config/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/coredns-config/values.yaml rename to lib/aws/bootstrap/charts/coredns-config/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore b/lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/.helmignore rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/Chart.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/deployment.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/rbac.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/secret.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/templates/serviceaccount.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml diff --git a/lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml b/lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/iam-eks-user-mapper/values.yaml rename to lib/aws/bootstrap/charts/iam-eks-user-mapper/values.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore b/lib/aws/bootstrap/charts/q-storageclass/.helmignore similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/.helmignore rename to lib/aws/bootstrap/charts/q-storageclass/.helmignore diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml b/lib/aws/bootstrap/charts/q-storageclass/Chart.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/Chart.yaml rename to lib/aws/bootstrap/charts/q-storageclass/Chart.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl b/lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/templates/_helpers.tpl rename to lib/aws/bootstrap/charts/q-storageclass/templates/_helpers.tpl diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml b/lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/templates/storageclass.yaml rename to lib/aws/bootstrap/charts/q-storageclass/templates/storageclass.yaml diff --git a/lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml b/lib/aws/bootstrap/charts/q-storageclass/values.yaml similarity index 100% rename from lib/aws/bootstrap-eks/charts/q-storageclass/values.yaml rename to lib/aws/bootstrap/charts/q-storageclass/values.yaml diff --git a/lib/aws/bootstrap-eks/documentdb.tf b/lib/aws/bootstrap/documentdb.tf similarity index 100% rename from lib/aws/bootstrap-eks/documentdb.tf rename to lib/aws/bootstrap/documentdb.tf diff --git a/lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf b/lib/aws/bootstrap/eks-ebs-csi-driver.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-ebs-csi-driver.tf rename to lib/aws/bootstrap/eks-ebs-csi-driver.tf diff --git a/lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf b/lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-gen-kubectl-config.j2.tf rename to lib/aws/bootstrap/eks-gen-kubectl-config.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-master-cluster.j2.tf b/lib/aws/bootstrap/eks-master-cluster.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-cluster.j2.tf rename to lib/aws/bootstrap/eks-master-cluster.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-master-iam.tf b/lib/aws/bootstrap/eks-master-iam.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-iam.tf rename to lib/aws/bootstrap/eks-master-iam.tf diff --git a/lib/aws/bootstrap-eks/eks-master-sec-group.tf b/lib/aws/bootstrap/eks-master-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-master-sec-group.tf rename to lib/aws/bootstrap/eks-master-sec-group.tf diff --git a/lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf b/lib/aws/bootstrap/eks-s3-kubeconfig-store.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-s3-kubeconfig-store.tf rename to lib/aws/bootstrap/eks-s3-kubeconfig-store.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-common.j2.tf b/lib/aws/bootstrap/eks-vpc-common.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-common.j2.tf rename to lib/aws/bootstrap/eks-vpc-common.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf b/lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-with-nat-gateways.j2.tf rename to lib/aws/bootstrap/eks-vpc-with-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf b/lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-vpc-without-nat-gateways.j2.tf rename to lib/aws/bootstrap/eks-vpc-without-nat-gateways.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-iam.tf b/lib/aws/bootstrap/eks-workers-iam.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-iam.tf rename to lib/aws/bootstrap/eks-workers-iam.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf b/lib/aws/bootstrap/eks-workers-nodes.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-nodes.j2.tf rename to lib/aws/bootstrap/eks-workers-nodes.j2.tf diff --git a/lib/aws/bootstrap-eks/eks-workers-sec-group.tf b/lib/aws/bootstrap/eks-workers-sec-group.tf similarity index 100% rename from lib/aws/bootstrap-eks/eks-workers-sec-group.tf rename to lib/aws/bootstrap/eks-workers-sec-group.tf diff --git a/lib/aws/bootstrap-eks/elasticcache.tf b/lib/aws/bootstrap/elasticcache.tf similarity index 100% rename from lib/aws/bootstrap-eks/elasticcache.tf rename to lib/aws/bootstrap/elasticcache.tf diff --git a/lib/aws/bootstrap-eks/elasticsearch.tf b/lib/aws/bootstrap/elasticsearch.tf similarity index 100% rename from lib/aws/bootstrap-eks/elasticsearch.tf rename to lib/aws/bootstrap/elasticsearch.tf diff --git a/lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf b/lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-aws-iam-eks-user-mapper.tf rename to lib/aws/bootstrap/helm-aws-iam-eks-user-mapper.tf diff --git a/lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf b/lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-cluster-autoscaler.j2.tf rename to lib/aws/bootstrap/helm-cluster-autoscaler.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-grafana.j2.tf b/lib/aws/bootstrap/helm-grafana.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-grafana.j2.tf rename to lib/aws/bootstrap/helm-grafana.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-loki.j2.tf b/lib/aws/bootstrap/helm-loki.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-loki.j2.tf rename to lib/aws/bootstrap/helm-loki.j2.tf diff --git a/lib/aws/bootstrap-eks/helm-nginx-ingress.tf b/lib/aws/bootstrap/helm-nginx-ingress.tf similarity index 100% rename from lib/aws/bootstrap-eks/helm-nginx-ingress.tf rename to lib/aws/bootstrap/helm-nginx-ingress.tf diff --git a/lib/aws/bootstrap-eks/helper.j2.sh b/lib/aws/bootstrap/helper.j2.sh similarity index 100% rename from lib/aws/bootstrap-eks/helper.j2.sh rename to lib/aws/bootstrap/helper.j2.sh diff --git a/lib/aws/bootstrap-eks/qovery-tf-config.j2.tf b/lib/aws/bootstrap/qovery-tf-config.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/qovery-tf-config.j2.tf rename to lib/aws/bootstrap/qovery-tf-config.j2.tf diff --git a/lib/aws/bootstrap-eks/qovery-vault.j2.tf b/lib/aws/bootstrap/qovery-vault.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/qovery-vault.j2.tf rename to lib/aws/bootstrap/qovery-vault.j2.tf diff --git a/lib/aws/bootstrap-eks/rds.tf b/lib/aws/bootstrap/rds.tf similarity index 100% rename from lib/aws/bootstrap-eks/rds.tf rename to lib/aws/bootstrap/rds.tf diff --git a/lib/aws/bootstrap-eks/s3-qovery-buckets.tf b/lib/aws/bootstrap/s3-qovery-buckets.tf similarity index 100% rename from lib/aws/bootstrap-eks/s3-qovery-buckets.tf rename to lib/aws/bootstrap/s3-qovery-buckets.tf diff --git a/lib/aws/bootstrap-eks/tf-default-vars.j2.tf b/lib/aws/bootstrap/tf-default-vars.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/tf-default-vars.j2.tf rename to lib/aws/bootstrap/tf-default-vars.j2.tf diff --git a/lib/aws/bootstrap-eks/tf-providers-aws.j2.tf b/lib/aws/bootstrap/tf-providers-aws.j2.tf similarity index 100% rename from lib/aws/bootstrap-eks/tf-providers-aws.j2.tf rename to lib/aws/bootstrap/tf-providers-aws.j2.tf diff --git a/lib/edge/aws/backend.j2.tf b/lib/edge/aws/backend.j2.tf deleted file mode 100644 index a1418800..00000000 --- a/lib/edge/aws/backend.j2.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - backend "s3" { - access_key = "{{ aws_access_key_tfstates_account }}" - secret_key = "{{ aws_secret_key_tfstates_account }}" - bucket = "{{ aws_terraform_backend_bucket }}" - key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate" - dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}" - region = "{{ aws_region_tfstates_account }}" - } -} diff --git a/lib/edge/aws/documentdb.tf b/lib/edge/aws/documentdb.tf deleted file mode 100644 index ea04fec0..00000000 --- a/lib/edge/aws/documentdb.tf +++ /dev/null @@ -1,81 +0,0 @@ -locals { - tags_documentdb = merge( - aws_eks_cluster.eks_cluster.tags, - { - "Service" = "DocumentDB" - } - ) -} - -# Network - -resource "aws_subnet" "documentdb_zone_a" { - count = length(var.documentdb_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.documentdb_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_subnet" "documentdb_zone_b" { - count = length(var.documentdb_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.documentdb_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_subnet" "documentdb_zone_c" { - count = length(var.documentdb_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.documentdb_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_documentdb -} - -resource "aws_route_table_association" "documentdb_cluster_zone_a" { - count = length(var.documentdb_subnets_zone_a) - - subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "documentdb_cluster_zone_b" { - count = length(var.documentdb_subnets_zone_b) - - subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "documentdb_cluster_zone_c" { - count = length(var.documentdb_subnets_zone_c) - - subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_docdb_subnet_group" "documentdb" { - description = "DocumentDB linked to ${var.kubernetes_cluster_id}" - name = "documentdb-${aws_vpc.eks.id}" - subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) - - tags = local.tags_documentdb -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "documentdb_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow DocumentDB incoming access from anywhere" - from_port = 27017 - protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id - to_port = 27017 - type = "ingress" -} diff --git a/lib/edge/aws/eks-vpc-common.j2.tf b/lib/edge/aws/eks-vpc-common.j2.tf deleted file mode 100644 index 63b91880..00000000 --- a/lib/edge/aws/eks-vpc-common.j2.tf +++ /dev/null @@ -1,42 +0,0 @@ -data "aws_availability_zones" "available" {} - -locals { - tags_eks_vpc = merge( - local.tags_common, - { - Name = "qovery-eks-workers", - "kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared", - "kubernetes.io/role/elb" = 1, - {% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %} - } - ) - - tags_eks_vpc_public = merge( - local.tags_eks_vpc, - { - "Public" = "true" - } - ) - - tags_eks_vpc_private = merge( - local.tags_eks, - { - "Public" = "false" - } - ) -} - -# VPC -resource "aws_vpc" "eks" { - cidr_block = var.vpc_cidr_block - enable_dns_hostnames = true - - tags = local.tags_eks_vpc -} - -# Internet gateway -resource "aws_internet_gateway" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - tags = local.tags_eks_vpc -} \ No newline at end of file diff --git a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf b/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf deleted file mode 100644 index d0174308..00000000 --- a/lib/edge/aws/eks-vpc-without-nat-gateways.j2.tf +++ /dev/null @@ -1,75 +0,0 @@ -{% if vpc_qovery_network_mode == "WithoutNatGateways" %} -# Public subnets -resource "aws_subnet" "eks_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.eks_subnets_zone_a_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.eks_subnets_zone_b_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_subnet" "eks_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.eks_subnets_zone_c_private[count.index] - vpc_id = aws_vpc.eks.id - map_public_ip_on_launch = true - - tags = local.tags_eks_vpc -} - -resource "aws_route_table" "eks_cluster" { - vpc_id = aws_vpc.eks.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.eks_cluster.id - } - - // todo(pmavro): add tests for it when it will be available in the SDK - {% for route in vpc_custom_routing_table %} - route { - cidr_block = "{{ route.destination }}" - gateway_id = "{{ route.target }}" - } - {% endfor %} - - tags = local.tags_eks_vpc -} - -resource "aws_route_table_association" "eks_cluster_zone_a" { - count = length(var.eks_subnets_zone_a_private) - - subnet_id = aws_subnet.eks_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_b" { - count = length(var.eks_subnets_zone_b_private) - - subnet_id = aws_subnet.eks_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "eks_cluster_zone_c" { - count = length(var.eks_subnets_zone_c_private) - - subnet_id = aws_subnet.eks_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} -{% endif %} \ No newline at end of file diff --git a/lib/edge/aws/elasticcache.tf b/lib/edge/aws/elasticcache.tf deleted file mode 100644 index 44073c63..00000000 --- a/lib/edge/aws/elasticcache.tf +++ /dev/null @@ -1,80 +0,0 @@ -locals { - tags_elasticache = merge( - aws_eks_cluster.eks_cluster.tags, - { - "Service" = "Elasticache" - } - ) -} - -# Network - -resource "aws_subnet" "elasticache_zone_a" { - count = length(var.elasticache_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.elasticache_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_subnet" "elasticache_zone_b" { - count = length(var.elasticache_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.elasticache_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_subnet" "elasticache_zone_c" { - count = length(var.elasticache_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.elasticache_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticache -} - -resource "aws_route_table_association" "elasticache_cluster_zone_a" { - count = length(var.elasticache_subnets_zone_a) - - subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticache_cluster_zone_b" { - count = length(var.elasticache_subnets_zone_b) - - subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticache_cluster_zone_c" { - count = length(var.elasticache_subnets_zone_c) - - subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_elasticache_subnet_group" "elasticache" { - description = "Elasticache linked to ${var.kubernetes_cluster_id}" - # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly - name = "elasticache-${aws_vpc.eks.id}" - subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "elasticache_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow Redis incoming access from anywhere" - from_port = 6379 - protocol = "tcp" - security_group_id = aws_security_group.eks_cluster_workers.id - to_port = 6379 - type = "ingress" -} diff --git a/lib/edge/aws/elasticsearch.tf b/lib/edge/aws/elasticsearch.tf deleted file mode 100644 index f5e873dd..00000000 --- a/lib/edge/aws/elasticsearch.tf +++ /dev/null @@ -1,79 +0,0 @@ -locals { - tags_elasticsearch = merge( - local.tags_eks, - { - "Service" = "Elasticsearch" - } - ) -} - -# Network - -resource "aws_subnet" "elasticsearch_zone_a" { - count = length(var.elasticsearch_subnets_zone_a) - - availability_zone = var.aws_availability_zones[0] - cidr_block = var.elasticsearch_subnets_zone_a[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_subnet" "elasticsearch_zone_b" { - count = length(var.elasticsearch_subnets_zone_b) - - availability_zone = var.aws_availability_zones[1] - cidr_block = var.elasticsearch_subnets_zone_b[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_subnet" "elasticsearch_zone_c" { - count = length(var.elasticsearch_subnets_zone_c) - - availability_zone = var.aws_availability_zones[2] - cidr_block = var.elasticsearch_subnets_zone_c[count.index] - vpc_id = aws_vpc.eks.id - - tags = local.tags_elasticsearch -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_a" { - count = length(var.elasticsearch_subnets_zone_a) - - subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_b" { - count = length(var.elasticsearch_subnets_zone_b) - - subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_route_table_association" "elasticsearch_cluster_zone_c" { - count = length(var.elasticsearch_subnets_zone_c) - - subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index] - route_table_id = aws_route_table.eks_cluster.id -} - -resource "aws_security_group" "elasticsearch" { - name = "elasticsearch-${var.kubernetes_cluster_id}" - description = "Elasticsearch security group" - vpc_id = aws_vpc.eks.id - - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - - cidr_blocks = [ - aws_vpc.eks.cidr_block - ] - } - - tags = local.tags_elasticsearch -} diff --git a/lib/edge/aws/qovery-vault.j2.tf b/lib/edge/aws/qovery-vault.j2.tf deleted file mode 100644 index b12afa38..00000000 --- a/lib/edge/aws/qovery-vault.j2.tf +++ /dev/null @@ -1,29 +0,0 @@ -locals { - kubeconfig_base64 = base64encode(local.kubeconfig) -} -// do not run for tests clusters to avoid uncleaned info. -// do not try to use count into resource, it will fails trying to connect to vault -{% if vault_auth_method != "none" and not test_cluster %} -resource "vault_generic_secret" "cluster-access" { - path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}" - - data_json = <, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + template_directory: String, + options: Options, + listeners: Listeners, + logger: Box, +} + +impl EC2 { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + logger: Box, + ) -> Result { + let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws-ec2/bootstrap", context.lib_root_dir()); + + let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; + let s3 = kubernetes::s3(&context, ®ion, &**cloud_provider); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EC2 { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + template_directory, + logger, + listeners, + }) + } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } +} + +impl Kubernetes for EC2 { + fn context(&self) -> &Context { + &self.context + } + + fn kind(&self) -> Kind { + Kind::Ec2 + } + + fn id(&self) -> &str { + self.id.as_str() + } + + fn name(&self) -> &str { + self.name.as_str() + } + + fn version(&self) -> &str { + self.version.as_str() + } + + fn region(&self) -> String { + self.region.to_aws_format() + } + + fn zone(&self) -> &str { + "" + } + + fn aws_zones(&self) -> Option> { + Some(self.zones.clone()) + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + (*self.cloud_provider).borrow() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + (*self.dns_provider).borrow() + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + &self.s3 + } + + fn is_valid(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_create(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + kubernetes::create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &vec![], + &self.options, + ) + }) + } + + #[named] + fn on_create_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::create_error(self)) + } + + fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + // TODO + Ok(()) + } + + #[named] + fn on_upgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) + } + + #[named] + fn on_upgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::upgrade_error(self)) + } + + #[named] + fn on_downgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, kubernetes::downgrade) + } + + #[named] + fn on_downgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::downgrade_error(self)) + } + + #[named] + fn on_pause(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) + } + + #[named] + fn on_pause_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || kubernetes::pause_error(self)) + } + + #[named] + fn on_delete(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + }) + } + + #[named] + fn on_delete_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || kubernetes::delete_error(self)) + } + + #[named] + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::pause_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } + + #[named] + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::delete_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } +} + +impl Listen for EC2 { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/aws/kubernetes/eks.rs b/src/cloud_provider/aws/kubernetes/eks.rs new file mode 100644 index 00000000..004923db --- /dev/null +++ b/src/cloud_provider/aws/kubernetes/eks.rs @@ -0,0 +1,670 @@ +use crate::cloud_provider; +use crate::cloud_provider::aws::kubernetes; +use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType; +use crate::cloud_provider::aws::kubernetes::Options; +use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; +use crate::cloud_provider::environment::Environment; +use crate::cloud_provider::kubernetes::{ + send_progress_on_long_task, Kind, Kubernetes, KubernetesNodesType, KubernetesUpgradeStatus, +}; +use crate::cloud_provider::models::NodeGroups; +use crate::cloud_provider::utilities::print_action; +use crate::cloud_provider::CloudProvider; +use crate::cmd::kubectl::{kubectl_exec_scale_replicas, ScalingKind}; +use crate::cmd::terraform::terraform_init_validate_plan_apply; +use crate::dns_provider::DnsProvider; +use crate::errors::EngineError; +use crate::events::Stage::Infrastructure; +use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage}; +use crate::io_models::{Action, Context, Listen, Listener, Listeners, ListenersHelper}; +use crate::logger::Logger; +use crate::object_storage::s3::S3; +use crate::object_storage::ObjectStorage; +use function_name::named; +use std::borrow::Borrow; +use std::str::FromStr; +use std::sync::Arc; + +pub struct EKS { + context: Context, + id: String, + long_id: uuid::Uuid, + name: String, + version: String, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + s3: S3, + nodes_groups: Vec, + template_directory: String, + options: Options, + listeners: Listeners, + logger: Box, +} + +impl EKS { + pub fn new( + context: Context, + id: &str, + long_id: uuid::Uuid, + name: &str, + version: &str, + region: AwsRegion, + zones: Vec, + cloud_provider: Arc>, + dns_provider: Arc>, + options: Options, + nodes_groups: Vec, + logger: Box, + ) -> Result { + let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); + + let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; + + for node_group in &nodes_groups { + if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { + let err = + EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); + + logger.log(EngineEvent::Error(err.clone(), None)); + + return Err(err); + } + } + + let s3 = kubernetes::s3(&context, ®ion, &**cloud_provider); + + // copy listeners from CloudProvider + let listeners = cloud_provider.listeners().clone(); + Ok(EKS { + context, + id: id.to_string(), + long_id, + name: name.to_string(), + version: version.to_string(), + region, + zones: aws_zones, + cloud_provider, + dns_provider, + s3, + options, + nodes_groups, + template_directory, + logger, + listeners, + }) + } + + fn set_cluster_autoscaler_replicas( + &self, + event_details: EventDetails, + replicas_count: u32, + ) -> Result<(), EngineError> { + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), + )); + let (kubeconfig_path, _) = self.get_kubeconfig_file()?; + let selector = "cluster-autoscaler-aws-cluster-autoscaler"; + let namespace = "kube-system"; + let _ = kubectl_exec_scale_replicas( + kubeconfig_path, + self.cloud_provider().credentials_environment_variables(), + namespace, + ScalingKind::Deployment, + selector, + replicas_count, + ) + .map_err(|e| { + EngineError::new_k8s_scale_replicas( + event_details.clone(), + selector.to_string(), + namespace.to_string(), + replicas_count, + e, + ) + })?; + + Ok(()) + } + + fn cloud_provider_name(&self) -> &str { + "aws" + } + + fn struct_name(&self) -> &str { + "kubernetes" + } +} + +impl Kubernetes for EKS { + fn context(&self) -> &Context { + &self.context + } + + fn kind(&self) -> Kind { + Kind::Eks + } + + fn id(&self) -> &str { + self.id.as_str() + } + + fn name(&self) -> &str { + self.name.as_str() + } + + fn version(&self) -> &str { + self.version.as_str() + } + + fn region(&self) -> String { + self.region.to_aws_format() + } + + fn zone(&self) -> &str { + "" + } + + fn aws_zones(&self) -> Option> { + Some(self.zones.clone()) + } + + fn cloud_provider(&self) -> &dyn CloudProvider { + (*self.cloud_provider).borrow() + } + + fn dns_provider(&self) -> &dyn DnsProvider { + (*self.dns_provider).borrow() + } + + fn logger(&self) -> &dyn Logger { + self.logger.borrow() + } + + fn config_file_store(&self) -> &dyn ObjectStorage { + &self.s3 + } + + fn is_valid(&self) -> Result<(), EngineError> { + Ok(()) + } + + #[named] + fn on_create(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || { + kubernetes::create( + self, + self.long_id, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_create_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::create_error(self)) + } + + fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { + let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); + let listeners_helper = ListenersHelper::new(&self.listeners); + + self.send_to_customer( + format!( + "Start preparing EKS upgrade process {} cluster with id {}", + self.name(), + self.id() + ) + .as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), + )); + + let temp_dir = self.get_temp_dir(event_details.clone())?; + + // generate terraform files and copy them into temp dir + let mut context = kubernetes::tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; + + // + // Upgrade master nodes + // + match &kubernetes_upgrade_status.required_upgrade_on { + Some(KubernetesNodesType::Masters) => { + self.send_to_customer( + format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), + )); + + // AWS requires the upgrade to be done in 2 steps (masters, then workers) + // use the current kubernetes masters' version for workers, in order to avoid migration in one step + context.insert( + "kubernetes_master_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); + // use the current master version for workers, they will be updated later + context.insert( + "eks_workers_version", + format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(), + ); + + if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( + self.template_directory.as_str(), + temp_dir.as_str(), + context.clone(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + self.template_directory.to_string(), + temp_dir, + e, + )); + } + + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); + if let Err(e) = crate::template::copy_non_template_files( + common_bootstrap_charts.as_str(), + common_charts_temp_dir.as_str(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + common_bootstrap_charts, + common_charts_temp_dir, + e, + )); + } + + self.send_to_customer( + format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), + )); + + match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { + Ok(_) => { + self.send_to_customer( + format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes master nodes have been successfully upgraded.".to_string(), + ), + )); + } + Err(e) => { + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + } + } + Some(KubernetesNodesType::Workers) => { + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), + ), + )); + } + None => { + self.logger().log(EngineEvent::Info( + event_details, + EventMessage::new_from_safe( + "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), + ), + )); + return Ok(()); + } + } + + if let Err(e) = self.delete_crashlooping_pods( + None, + None, + Some(3), + self.cloud_provider().credentials_environment_variables(), + Stage::Infrastructure(InfrastructureStep::Upgrade), + ) { + self.logger().log(EngineEvent::Error(e.clone(), None)); + return Err(e); + } + + // + // Upgrade worker nodes + // + self.send_to_customer( + format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), + )); + + // disable cluster autoscaler to avoid interfering with AWS upgrade procedure + context.insert("enable_cluster_autoscaler", &false); + context.insert( + "eks_workers_version", + format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), + ); + + if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( + self.template_directory.as_str(), + temp_dir.as_str(), + context.clone(), + ) { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + self.template_directory.to_string(), + temp_dir, + e, + )); + } + + // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. + // this is due to the required dependencies of lib/aws/bootstrap/*.tf files + let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); + let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); + if let Err(e) = + crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) + { + return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( + event_details, + common_bootstrap_charts, + common_charts_temp_dir, + e, + )); + } + + self.send_to_customer( + format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), + )); + + // Disable cluster autoscaler deployment + let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; + + match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { + Ok(_) => { + self.send_to_customer( + format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), + &listeners_helper, + ); + self.logger().log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe( + "Kubernetes workers nodes have been successfully upgraded.".to_string(), + ), + )); + } + Err(e) => { + // enable cluster autoscaler deployment + let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; + + return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); + } + } + + // enable cluster autoscaler deployment + self.set_cluster_autoscaler_replicas(event_details, 1) + } + + #[named] + fn on_upgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || self.upgrade()) + } + + #[named] + fn on_upgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::upgrade_error(self)) + } + + #[named] + fn on_downgrade(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, kubernetes::downgrade) + } + + #[named] + fn on_downgrade_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Create, || kubernetes::downgrade_error(self)) + } + + #[named] + fn on_pause(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || { + kubernetes::pause( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_pause_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Pause, || kubernetes::pause_error(self)) + } + + #[named] + fn on_delete(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || { + kubernetes::delete( + self, + self.template_directory.as_str(), + &self.zones, + &self.nodes_groups, + &self.options, + ) + }) + } + + #[named] + fn on_delete_error(&self) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + send_progress_on_long_task(self, Action::Delete, || kubernetes::delete_error(self)) + } + + #[named] + fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::pause_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } + + #[named] + fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details.clone(), + self.logger(), + ); + cloud_provider::kubernetes::delete_environment(self, environment, event_details, self.logger()) + } + + #[named] + fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { + let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); + print_action( + self.cloud_provider_name(), + self.struct_name(), + function_name!(), + self.name(), + event_details, + self.logger(), + ); + Ok(()) + } +} + +impl Listen for EKS { + fn listeners(&self) -> &Listeners { + &self.listeners + } + + fn add_listener(&mut self, listener: Listener) { + self.listeners.push(listener); + } +} diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 345470d8..09df99cb 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -1,9 +1,6 @@ use core::fmt; -use std::borrow::Borrow; use std::env; use std::path::Path; -use std::str::FromStr; -use std::sync::Arc; use retry::delay::{Fibonacci, Fixed}; use retry::Error::Operation; @@ -12,42 +9,30 @@ use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites}; -use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType; use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create; use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; -use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo}; use crate::cloud_provider::kubernetes::{ - is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes, - KubernetesNodesType, KubernetesUpgradeStatus, ProviderOptions, + is_kubernetes_upgrade_required, uninstall_cert_manager, Kubernetes, ProviderOptions, }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; -use crate::cloud_provider::utilities::print_action; -use crate::cloud_provider::{kubernetes, CloudProvider}; +use crate::cloud_provider::CloudProvider; use crate::cmd; use crate::cmd::helm::{to_engine_error, Helm}; -use crate::cmd::kubectl::{ - kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, - kubectl_exec_scale_replicas, ScalingKind, -}; +use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events}; use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider; use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; -use crate::events::Stage::Infrastructure; -use crate::events::{EngineEvent, EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; -use crate::io_models::{ - Action, Context, Features, Listen, Listener, Listeners, ListenersHelper, QoveryIdentifier, ToHelmString, - ToTerraformString, -}; -use crate::logger::Logger; +use crate::events::{EngineEvent, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; +use crate::io_models::{Context, Features, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString}; use crate::object_storage::s3::S3; -use crate::object_storage::ObjectStorage; use crate::string::terraform_list_format; -use ::function_name::named; +pub mod ec2; +pub mod eks; pub mod helm_charts; pub mod node; pub mod roles; @@ -122,1022 +107,6 @@ pub struct Options { impl ProviderOptions for Options {} -pub struct EKS { - context: Context, - id: String, - long_id: uuid::Uuid, - name: String, - version: String, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - s3: S3, - nodes_groups: Vec, - template_directory: String, - options: Options, - listeners: Listeners, - logger: Box, -} - -impl EKS { - pub fn new( - context: Context, - id: &str, - long_id: uuid::Uuid, - name: &str, - version: &str, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - options: Options, - nodes_groups: Vec, - logger: Box, - ) -> Result { - let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap-eks", context.lib_root_dir()); - - let aws_zones = aws_zones(zones, ®ion, &event_details)?; - - for node_group in &nodes_groups { - if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) { - let err = - EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e); - - logger.log(EngineEvent::Error(err.clone(), None)); - - return Err(err); - } - } - - let s3 = s3(&context, ®ion, &**cloud_provider); - - // copy listeners from CloudProvider - let listeners = cloud_provider.listeners().clone(); - Ok(EKS { - context, - id: id.to_string(), - long_id, - name: name.to_string(), - version: version.to_string(), - region, - zones: aws_zones, - cloud_provider, - dns_provider, - s3, - options, - nodes_groups, - template_directory, - logger, - listeners, - }) - } - - fn set_cluster_autoscaler_replicas( - &self, - event_details: EventDetails, - replicas_count: u32, - ) -> Result<(), EngineError> { - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("Scaling cluster autoscaler to `{}`.", replicas_count)), - )); - let (kubeconfig_path, _) = self.get_kubeconfig_file()?; - let selector = "cluster-autoscaler-aws-cluster-autoscaler"; - let namespace = "kube-system"; - let _ = kubectl_exec_scale_replicas( - kubeconfig_path, - self.cloud_provider().credentials_environment_variables(), - namespace, - ScalingKind::Deployment, - selector, - replicas_count, - ) - .map_err(|e| { - EngineError::new_k8s_scale_replicas( - event_details.clone(), - selector.to_string(), - namespace.to_string(), - replicas_count, - e, - ) - })?; - - Ok(()) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "kubernetes" - } -} - -impl Kubernetes for EKS { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::Eks - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn version(&self) -> &str { - self.version.as_str() - } - - fn region(&self) -> String { - self.region.to_aws_format() - } - - fn zone(&self) -> &str { - "" - } - - fn aws_zones(&self) -> Option> { - Some(self.zones.clone()) - } - - fn cloud_provider(&self) -> &dyn CloudProvider { - (*self.cloud_provider).borrow() - } - - fn dns_provider(&self) -> &dyn DnsProvider { - (*self.dns_provider).borrow() - } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } - - fn config_file_store(&self) -> &dyn ObjectStorage { - &self.s3 - } - - fn is_valid(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || { - create( - self, - self.long_id, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || create_error(self)) - } - - fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - let event_details = self.get_event_details(Infrastructure(InfrastructureStep::Upgrade)); - let listeners_helper = ListenersHelper::new(&self.listeners); - - self.send_to_customer( - format!( - "Start preparing EKS upgrade process {} cluster with id {}", - self.name(), - self.id() - ) - .as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Start preparing EKS cluster upgrade process".to_string()), - )); - - let temp_dir = self.get_temp_dir(event_details.clone())?; - - // generate terraform files and copy them into temp dir - let mut context = tera_context(self, &self.zones, &self.nodes_groups, &self.options)?; - - // - // Upgrade master nodes - // - match &kubernetes_upgrade_status.required_upgrade_on { - Some(KubernetesNodesType::Masters) => { - self.send_to_customer( - format!("Start upgrading process for master nodes on {}/{}", self.name(), self.id()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Start upgrading process for master nodes.".to_string()), - )); - - // AWS requires the upgrade to be done in 2 steps (masters, then workers) - // use the current kubernetes masters' version for workers, in order to avoid migration in one step - context.insert( - "kubernetes_master_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); - // use the current master version for workers, they will be updated later - context.insert( - "eks_workers_version", - format!("{}", &kubernetes_upgrade_status.deployed_masters_version).as_str(), - ); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context.clone(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - if let Err(e) = crate::template::copy_non_template_files( - common_bootstrap_charts.as_str(), - common_charts_temp_dir.as_str(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - common_bootstrap_charts, - common_charts_temp_dir, - e, - )); - } - - self.send_to_customer( - format!("Upgrading Kubernetes {} master nodes", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes master nodes.".to_string()), - )); - - match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes {} master nodes have been successfully upgraded", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes master nodes have been successfully upgraded.".to_string(), - ), - )); - } - Err(e) => { - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - } - } - Some(KubernetesNodesType::Workers) => { - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "No need to perform Kubernetes master upgrade, they are already up to date.".to_string(), - ), - )); - } - None => { - self.logger().log(EngineEvent::Info( - event_details, - EventMessage::new_from_safe( - "No Kubernetes upgrade required, masters and workers are already up to date.".to_string(), - ), - )); - return Ok(()); - } - } - - if let Err(e) = self.delete_crashlooping_pods( - None, - None, - Some(3), - self.cloud_provider().credentials_environment_variables(), - Stage::Infrastructure(InfrastructureStep::Upgrade), - ) { - self.logger().log(EngineEvent::Error(e.clone(), None)); - return Err(e); - } - - // - // Upgrade worker nodes - // - self.send_to_customer( - format!("Preparing workers nodes for upgrade for Kubernetes cluster {}", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Preparing workers nodes for upgrade for Kubernetes cluster.".to_string()), - )); - - // disable cluster autoscaler to avoid interfering with AWS upgrade procedure - context.insert("enable_cluster_autoscaler", &false); - context.insert( - "eks_workers_version", - format!("{}", &kubernetes_upgrade_status.requested_version).as_str(), - ); - - if let Err(e) = crate::template::generate_and_copy_all_files_into_dir( - self.template_directory.as_str(), - temp_dir.as_str(), - context.clone(), - ) { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - self.template_directory.to_string(), - temp_dir, - e, - )); - } - - // copy lib/common/bootstrap/charts directory (and sub directory) into the lib/aws/bootstrap/common/charts directory. - // this is due to the required dependencies of lib/aws/bootstrap/*.tf files - let common_charts_temp_dir = format!("{}/common/charts", temp_dir.as_str()); - let common_bootstrap_charts = format!("{}/common/bootstrap/charts", self.context.lib_root_dir()); - if let Err(e) = - crate::template::copy_non_template_files(common_bootstrap_charts.as_str(), common_charts_temp_dir.as_str()) - { - return Err(EngineError::new_cannot_copy_files_from_one_directory_to_another( - event_details, - common_bootstrap_charts, - common_charts_temp_dir, - e, - )); - } - - self.send_to_customer( - format!("Upgrading Kubernetes {} worker nodes", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe("Upgrading Kubernetes worker nodes.".to_string()), - )); - - // Disable cluster autoscaler deployment - let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 0)?; - - match terraform_init_validate_plan_apply(temp_dir.as_str(), self.context.is_dry_run_deploy()) { - Ok(_) => { - self.send_to_customer( - format!("Kubernetes {} workers nodes have been successfully upgraded", self.name()).as_str(), - &listeners_helper, - ); - self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe( - "Kubernetes workers nodes have been successfully upgraded.".to_string(), - ), - )); - } - Err(e) => { - // enable cluster autoscaler deployment - let _ = self.set_cluster_autoscaler_replicas(event_details.clone(), 1)?; - - return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); - } - } - - // enable cluster autoscaler deployment - self.set_cluster_autoscaler_replicas(event_details, 1) - } - - #[named] - fn on_upgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || self.upgrade()) - } - - #[named] - fn on_upgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) - } - - #[named] - fn on_downgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, downgrade) - } - - #[named] - fn on_downgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) - } - - #[named] - fn on_pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || { - pause( - self, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_pause_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || pause_error(self)) - } - - #[named] - fn on_delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || { - delete( - self, - self.template_directory.as_str(), - &self.zones, - &self.nodes_groups, - &self.options, - ) - }) - } - - #[named] - fn on_delete_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || delete_error(self)) - } - - #[named] - fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::pause_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - #[named] - fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::delete_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for EKS { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - -pub struct EC2 { - context: Context, - id: String, - long_id: uuid::Uuid, - name: String, - version: String, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - s3: S3, - template_directory: String, - options: Options, - listeners: Listeners, - logger: Box, -} - -impl EC2 { - pub fn new( - context: Context, - id: &str, - long_id: uuid::Uuid, - name: &str, - version: &str, - region: AwsRegion, - zones: Vec, - cloud_provider: Arc>, - dns_provider: Arc>, - options: Options, - logger: Box, - ) -> Result { - let event_details = event_details(&cloud_provider, id, name, ®ion, &context); - let template_directory = format!("{}/aws/bootstrap-ec2", context.lib_root_dir()); - - let aws_zones = aws_zones(zones, ®ion, &event_details)?; - let s3 = s3(&context, ®ion, &**cloud_provider); - - // copy listeners from CloudProvider - let listeners = cloud_provider.listeners().clone(); - Ok(EC2 { - context, - id: id.to_string(), - long_id, - name: name.to_string(), - version: version.to_string(), - region, - zones: aws_zones, - cloud_provider, - dns_provider, - s3, - options, - template_directory, - logger, - listeners, - }) - } - - fn cloud_provider_name(&self) -> &str { - "aws" - } - - fn struct_name(&self) -> &str { - "kubernetes" - } -} - -impl Kubernetes for EC2 { - fn context(&self) -> &Context { - &self.context - } - - fn kind(&self) -> Kind { - Kind::Ec2 - } - - fn id(&self) -> &str { - self.id.as_str() - } - - fn name(&self) -> &str { - self.name.as_str() - } - - fn version(&self) -> &str { - self.version.as_str() - } - - fn region(&self) -> String { - self.region.to_aws_format() - } - - fn zone(&self) -> &str { - "" - } - - fn aws_zones(&self) -> Option> { - Some(self.zones.clone()) - } - - fn cloud_provider(&self) -> &dyn CloudProvider { - (*self.cloud_provider).borrow() - } - - fn dns_provider(&self) -> &dyn DnsProvider { - (*self.dns_provider).borrow() - } - - fn logger(&self) -> &dyn Logger { - self.logger.borrow() - } - - fn config_file_store(&self) -> &dyn ObjectStorage { - &self.s3 - } - - fn is_valid(&self) -> Result<(), EngineError> { - Ok(()) - } - - #[named] - fn on_create(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || { - create( - self, - self.long_id, - self.template_directory.as_str(), - &self.zones, - &vec![], - &self.options, - ) - }) - } - - #[named] - fn on_create_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || create_error(self)) - } - - fn upgrade_with_status(&self, _kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError> { - // TODO - Ok(()) - } - - #[named] - fn on_upgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || self.upgrade()) - } - - #[named] - fn on_upgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Upgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || upgrade_error(self)) - } - - #[named] - fn on_downgrade(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, downgrade) - } - - #[named] - fn on_downgrade_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Downgrade)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Create, || downgrade_error(self)) - } - - #[named] - fn on_pause(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || { - pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) - }) - } - - #[named] - fn on_pause_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Pause, || pause_error(self)) - } - - #[named] - fn on_delete(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || { - delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) - }) - } - - #[named] - fn on_delete_error(&self) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - send_progress_on_long_task(self, Action::Delete, || delete_error(self)) - } - - #[named] - fn deploy_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn deploy_environment_error(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::deploy_environment_error(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::pause_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn pause_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Pause)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } - - #[named] - fn delete_environment(&self, environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details.clone(), - self.logger(), - ); - kubernetes::delete_environment(self, environment, event_details, self.logger()) - } - - #[named] - fn delete_environment_error(&self, _environment: &Environment) -> Result<(), EngineError> { - let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete)); - print_action( - self.cloud_provider_name(), - self.struct_name(), - function_name!(), - self.name(), - event_details, - self.logger(), - ); - Ok(()) - } -} - -impl Listen for EC2 { - fn listeners(&self) -> &Listeners { - &self.listeners - } - - fn add_listener(&mut self, listener: Listener) { - self.listeners.push(listener); - } -} - fn event_details>( cloud_provider: &Box, kubernetes_id: S, @@ -1172,7 +141,7 @@ fn aws_zones( region.to_string(), zone, CommandError::new_from_safe_message(e.to_string()), - )) + )); } }; } @@ -1564,7 +533,6 @@ fn create( } }, Err(_) => kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("Kubernetes cluster upgrade not required, config file is not found and cluster have certainly never been deployed before".to_string()))) - }; // create AWS IAM roles @@ -1647,7 +615,7 @@ fn create( event_details, entry.to_string(), e, - )) + )); } } }; @@ -1905,10 +873,10 @@ fn pause( kubernetes.logger().log(EngineEvent::Info(event_details.clone(), EventMessage::new_from_safe("No current running jobs on the Engine, infrastructure pause is allowed to start".to_string()))); } Err(Operation { error, .. }) => { - return Err(error) + return Err(error); } Err(retry::Error::Internal(msg)) => { - return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))) + return Err(EngineError::new_cannot_pause_cluster_tasks_are_running(event_details, Some(CommandError::new_from_safe_message(msg)))); } } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 8a8e857e..57f77098 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -20,7 +20,9 @@ use crate::utilities::{ FuncTestsSecrets, }; use base64; -use qovery_engine::cloud_provider::aws::kubernetes::{VpcQoveryNetworkMode, EC2, EKS}; +use qovery_engine::cloud_provider::aws::kubernetes::ec2::EC2; +use qovery_engine::cloud_provider::aws::kubernetes::eks::EKS; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; From 302eafc23e871d8bc84797cb96159a2678a9c6f6 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Fri, 22 Apr 2022 23:56:43 +0200 Subject: [PATCH 074/122] wip: fix ec2 tests --- src/cloud_provider/aws/kubernetes/mod.rs | 4 ++++ test_utilities/src/aws.rs | 13 +++++++++++++ .../aws_kubernetes_ec2.rs} | 12 ++++++------ tests/aws/mod.rs | 1 + tests/edge/aws/mod.rs | 1 - tests/edge/mod.rs | 1 - tests/lib.rs | 1 - 7 files changed, 24 insertions(+), 9 deletions(-) rename tests/{edge/aws/edge_aws_kubernetes.rs => aws/aws_kubernetes_ec2.rs} (86%) delete mode 100644 tests/edge/aws/mod.rs delete mode 100644 tests/edge/mod.rs diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 09df99cb..f24bf34b 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -81,8 +81,10 @@ pub struct Options { pub vpc_qovery_network_mode: VpcQoveryNetworkMode, pub vpc_cidr_block: String, pub eks_cidr_subnet: String, + pub ec2_cidr_subnet: String, pub vpc_custom_routing_table: Vec, pub eks_access_cidr_blocks: Vec, + pub ec2_access_cidr_blocks: Vec, pub rds_cidr_subnet: String, pub documentdb_cidr_subnet: String, pub elasticache_cidr_subnet: String, @@ -438,6 +440,8 @@ fn tera_context( context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private); context.insert("eks_masters_version", &kubernetes.version()); context.insert("eks_workers_version", &kubernetes.version()); + context.insert("ec2_masters_version", &kubernetes.version()); + context.insert("ec2_workers_version", &kubernetes.version()); context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index c6c5c4b1..6c5d7ee2 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -32,6 +32,8 @@ pub const AWS_KUBERNETES_VERSION: &'static str = pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; +pub const K3S_KUBERNETES_MAJOR_VERSION: u8 = 1; +pub const K3S_KUBERNETES_MINOR_VERSION: u8 = 20; pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR { let secrets = FuncTestsSecrets::new(); @@ -208,9 +210,20 @@ impl Cluster for AWS { vpc_qovery_network_mode: VpcQoveryNetworkMode::WithoutNatGateways, vpc_cidr_block: "10.0.0.0/16".to_string(), eks_cidr_subnet: "20".to_string(), + ec2_cidr_subnet: "20".to_string(), vpc_custom_routing_table: vec![], eks_access_cidr_blocks: secrets .EKS_ACCESS_CIDR_BLOCKS + .as_ref() + .unwrap() + .replace("\"", "") + .replace("[", "") + .replace("]", "") + .split(",") + .map(|c| c.to_string()) + .collect(), + ec2_access_cidr_blocks: secrets + .EKS_ACCESS_CIDR_BLOCKS // FIXME ? use an EC2_ACCESS_CIDR_BLOCKS? .unwrap() .replace("\"", "") .replace("[", "") diff --git a/tests/edge/aws/edge_aws_kubernetes.rs b/tests/aws/aws_kubernetes_ec2.rs similarity index 86% rename from tests/edge/aws/edge_aws_kubernetes.rs rename to tests/aws/aws_kubernetes_ec2.rs index a46c9368..074abe5f 100644 --- a/tests/edge/aws/edge_aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -1,6 +1,5 @@ extern crate test_utilities; -use self::test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; use ::function_name::named; use qovery_engine::cloud_provider::kubernetes::Kind as KKind; @@ -10,12 +9,13 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithN use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; +use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; #[cfg(feature = "test-aws-infra")] -fn create_and_destroy_edge_aws_cluster( +fn create_and_destroy_aws_ec2_k3s_cluster( region: String, test_type: ClusterTestType, major_boot_version: u8, @@ -55,13 +55,13 @@ fn create_and_destroy_edge_aws_cluster( #[cfg(feature = "test-aws-infra")] #[named] #[test] -fn create_and_destroy_edge_aws_cluster_eu_west_3() { +fn create_and_destroy_aws_ec2_k3s_cluster_eu_west_3() { let region = "eu-west-3".to_string(); - create_and_destroy_eks_cluster( + create_and_destroy_aws_ec2_k3s_cluster( region, ClusterTestType::Classic, - K3S_MAJOR_VERSION, - K3S_MINOR_VERSION, + K3S_KUBERNETES_MAJOR_VERSION, + K3S_KUBERNETES_MINOR_VERSION, WithoutNatGateways, function_name!(), ); diff --git a/tests/aws/mod.rs b/tests/aws/mod.rs index ace8ed56..2ead3484 100644 --- a/tests/aws/mod.rs +++ b/tests/aws/mod.rs @@ -1,5 +1,6 @@ mod aws_databases; mod aws_environment; mod aws_kubernetes; +mod aws_kubernetes_ec2; mod aws_s3; mod aws_whole_enchilada; diff --git a/tests/edge/aws/mod.rs b/tests/edge/aws/mod.rs deleted file mode 100644 index 24609250..00000000 --- a/tests/edge/aws/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod edge_aws_kubernetes; diff --git a/tests/edge/mod.rs b/tests/edge/mod.rs deleted file mode 100644 index 827da9e3..00000000 --- a/tests/edge/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod aws; diff --git a/tests/lib.rs b/tests/lib.rs index 18c6bc2d..bbc13eb3 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,5 +3,4 @@ extern crate maplit; mod aws; mod digitalocean; -mod edge; mod scaleway; From 5b19a526577928411716e91fd9c898a62ae1d4fc Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Sat, 23 Apr 2022 00:02:24 +0200 Subject: [PATCH 075/122] wip: fix ec2 tests --- src/cloud_provider/aws/kubernetes/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index f24bf34b..b55c48c2 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -286,8 +286,10 @@ fn tera_context( let vpc_cidr_block = options.vpc_cidr_block.clone(); let eks_cloudwatch_log_group = format!("/aws/eks/{}/cluster", kubernetes.id()); let eks_cidr_subnet = options.eks_cidr_subnet.clone(); + let ec2_cidr_subnet = options.ec2_cidr_subnet.clone(); let eks_access_cidr_blocks = format_ips(&options.eks_access_cidr_blocks); + let ec2_access_cidr_blocks = format_ips(&options.ec2_access_cidr_blocks); let qovery_api_url = options.qovery_api_url.clone(); let rds_cidr_subnet = options.rds_cidr_subnet.clone(); @@ -427,6 +429,7 @@ fn tera_context( // AWS - EKS context.insert("aws_availability_zones", &aws_zones); context.insert("eks_cidr_subnet", &eks_cidr_subnet); + context.insert("ec2_cidr_subnet", &ec2_cidr_subnet); context.insert("kubernetes_cluster_name", kubernetes.name()); context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); @@ -444,6 +447,7 @@ fn tera_context( context.insert("ec2_workers_version", &kubernetes.version()); context.insert("eks_cloudwatch_log_group", &eks_cloudwatch_log_group); context.insert("eks_access_cidr_blocks", &eks_access_cidr_blocks); + context.insert("ec2_access_cidr_blocks", &ec2_access_cidr_blocks); // AWS - RDS context.insert("rds_cidr_subnet", &rds_cidr_subnet); From 66246db3a5fdcacf40b1bdc693a566dcf9422efe Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 09:37:58 +0200 Subject: [PATCH 076/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index b55c48c2..6edefd76 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -110,7 +110,7 @@ pub struct Options { impl ProviderOptions for Options {} fn event_details>( - cloud_provider: &Box, + cloud_provider: &dyn CloudProvider, kubernetes_id: S, kubernetes_name: S, kubernetes_region: &AwsRegion, @@ -201,7 +201,7 @@ fn managed_dns_resolvers_terraform_format(dns_provider: &dyn DnsProvider) -> Str fn tera_context( kubernetes: &dyn Kubernetes, - zones: &Vec, + zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result { @@ -493,7 +493,7 @@ fn create( kubernetes_long_id: uuid::Uuid, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Create)); @@ -755,7 +755,7 @@ fn pause( kubernetes: &dyn Kubernetes, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)); @@ -941,7 +941,7 @@ fn delete( kubernetes: &dyn Kubernetes, template_directory: &str, aws_zones: &Vec, - node_groups: &Vec, + node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { let event_details = kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Delete)); From e2bf484cf27b2c9f090edc7ea1a9a1f2d4fedc84 Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 11:03:42 +0200 Subject: [PATCH 077/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/ec2.rs | 2 +- src/cloud_provider/aws/kubernetes/eks.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2.rs b/src/cloud_provider/aws/kubernetes/ec2.rs index fbbbe2d6..62d0ca45 100644 --- a/src/cloud_provider/aws/kubernetes/ec2.rs +++ b/src/cloud_provider/aws/kubernetes/ec2.rs @@ -48,7 +48,7 @@ impl EC2 { options: Options, logger: Box, ) -> Result { - let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let event_details = kubernetes::event_details(&**cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws-ec2/bootstrap", context.lib_root_dir()); let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; diff --git a/src/cloud_provider/aws/kubernetes/eks.rs b/src/cloud_provider/aws/kubernetes/eks.rs index 004923db..e3a78fa3 100644 --- a/src/cloud_provider/aws/kubernetes/eks.rs +++ b/src/cloud_provider/aws/kubernetes/eks.rs @@ -58,7 +58,7 @@ impl EKS { nodes_groups: Vec, logger: Box, ) -> Result { - let event_details = kubernetes::event_details(&cloud_provider, id, name, ®ion, &context); + let event_details = kubernetes::event_details(&**cloud_provider, id, name, ®ion, &context); let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir()); let aws_zones = kubernetes::aws_zones(zones, ®ion, &event_details)?; From f0f81000904d3185c002fe5223396eebbc6bc18d Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Tue, 26 Apr 2022 11:14:31 +0200 Subject: [PATCH 078/122] chore: fix linter --- src/cloud_provider/aws/kubernetes/ec2.rs | 6 +++--- src/cloud_provider/aws/kubernetes/mod.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2.rs b/src/cloud_provider/aws/kubernetes/ec2.rs index 62d0ca45..defb2242 100644 --- a/src/cloud_provider/aws/kubernetes/ec2.rs +++ b/src/cloud_provider/aws/kubernetes/ec2.rs @@ -153,7 +153,7 @@ impl Kubernetes for EC2 { self.long_id, self.template_directory.as_str(), &self.zones, - &vec![], + &[], &self.options, ) }) @@ -246,7 +246,7 @@ impl Kubernetes for EC2 { self.logger(), ); send_progress_on_long_task(self, Action::Pause, || { - kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + kubernetes::pause(self, self.template_directory.as_str(), &self.zones, &[], &self.options) }) } @@ -276,7 +276,7 @@ impl Kubernetes for EC2 { self.logger(), ); send_progress_on_long_task(self, Action::Delete, || { - kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &vec![], &self.options) + kubernetes::delete(self, self.template_directory.as_str(), &self.zones, &[], &self.options) }) } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 6edefd76..54b7ab17 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -492,7 +492,7 @@ fn create( kubernetes: &dyn Kubernetes, kubernetes_long_id: uuid::Uuid, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { @@ -754,7 +754,7 @@ fn downgrade_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { fn pause( kubernetes: &dyn Kubernetes, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { @@ -940,7 +940,7 @@ fn pause_error(kubernetes: &dyn Kubernetes) -> Result<(), EngineError> { fn delete( kubernetes: &dyn Kubernetes, template_directory: &str, - aws_zones: &Vec, + aws_zones: &[AwsZones], node_groups: &[NodeGroups], options: &Options, ) -> Result<(), EngineError> { From 0856a7b8a45e7337804b5ffb0c5d47f850ed00cf Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Wed, 27 Apr 2022 11:28:11 +0200 Subject: [PATCH 079/122] tests: create tag for AWS EC2 infra tests AWS EC2 infrastructure tests will have a dedicated CI pipeline bucket. Ticket: ENG-1179 --- Cargo.toml | 5 +++-- tests/aws/aws_kubernetes_ec2.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5ae0c0e7..5d48f51b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,9 +101,10 @@ test-scw-managed-services = [] test-all-managed-services = ["test-aws-managed-services", "test-do-managed-services", "test-scw-managed-services"] test-aws-infra = [] +test-aws-infra-ec2 = [] test-do-infra = [] test-scw-infra = [] -test-all-infra = ["test-aws-infra", "test-do-infra", "test-scw-infra"] +test-all-infra = ["test-aws-infra", "test-aws-infra-ec2", "test-do-infra", "test-scw-infra"] test-aws-whole-enchilada = [] test-do-whole-enchilada = [] @@ -111,7 +112,7 @@ test-scw-whole-enchilada = [] test-all-whole-enchilada = ["test-aws-whole-enchilada", "test-do-whole-enchilada", "test-scw-whole-enchilada"] # functionnal tests by provider -test-aws-all = ["test-aws-infra", "test-aws-managed-services", "test-aws-self-hosted", "test-aws-whole-enchilada"] +test-aws-all = ["test-aws-infra", "test-aws-infra-ec2", "test-aws-managed-services", "test-aws-self-hosted", "test-aws-whole-enchilada"] test-do-all = ["test-do-infra", "test-do-managed-services", "test-do-self-hosted", "test-do-whole-enchilada"] test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-hosted", "test-scw-whole-enchilada"] diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 074abe5f..425647c5 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -14,7 +14,7 @@ use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; -#[cfg(feature = "test-aws-infra")] +#[cfg(feature = "test-aws-infra-ec2")] fn create_and_destroy_aws_ec2_k3s_cluster( region: String, test_type: ClusterTestType, @@ -52,7 +52,7 @@ fn create_and_destroy_aws_ec2_k3s_cluster( It is useful to keep 2 clusters deployment tests to run in // to validate there is no name collision (overlaping) */ -#[cfg(feature = "test-aws-infra")] +#[cfg(feature = "test-aws-infra-ec2")] #[named] #[test] fn create_and_destroy_aws_ec2_k3s_cluster_eu_west_3() { From 68810bf05ec905be3aa9be62df7714dddb6c86c0 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Wed, 27 Apr 2022 13:18:44 +0200 Subject: [PATCH 080/122] fix: linter --- test_utilities/src/aws.rs | 10 ++-- .../src/{edge_aws_rs.rs => aws_ec2.rs} | 0 test_utilities/src/common.rs | 49 +++++++++---------- test_utilities/src/digitalocean.rs | 11 ++--- test_utilities/src/lib.rs | 2 +- test_utilities/src/scaleway.rs | 13 ++--- tests/aws/aws_kubernetes_ec2.rs | 4 +- 7 files changed, 39 insertions(+), 50 deletions(-) rename test_utilities/src/{edge_aws_rs.rs => aws_ec2.rs} (100%) diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 6c5d7ee2..5a0ddcac 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -5,10 +5,9 @@ use const_format::formatcp; use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::aws::AWS; -use qovery_engine::cloud_provider::kubernetes::Kind as KKind; +use qovery_engine::cloud_provider::kubernetes::Kind as KubernetesKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide; -use qovery_engine::cloud_provider::Kind::Aws; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::ecr::ECR; use qovery_engine::dns_provider::DnsProvider; @@ -63,7 +62,7 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> &context, logger, AWS_TEST_REGION.to_string().as_str(), - KKind::Eks, + KubernetesKind::Eks, AWS_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -75,7 +74,7 @@ impl Cluster for AWS { context: &Context, logger: Box, localisation: &str, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -91,14 +90,13 @@ impl Cluster for AWS { let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let kubernetes = get_environment_test_kubernetes( - Aws, context, cloud_provider.clone(), kubernetes_kind, + kubernetes_version.as_str(), dns_provider.clone(), logger.clone(), localisation, - kubernetes_version.as_str(), vpc_network_mode, ); diff --git a/test_utilities/src/edge_aws_rs.rs b/test_utilities/src/aws_ec2.rs similarity index 100% rename from test_utilities/src/edge_aws_rs.rs rename to test_utilities/src/aws_ec2.rs diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 57f77098..2689f2be 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -13,7 +13,6 @@ use qovery_engine::io_models::{ use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION}; use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION}; -use crate::edge_aws_rs::AWS_K3S_VERSION; use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE}; use crate::utilities::{ db_disk_type, db_infos, db_instance_type, generate_id, generate_password, get_pvc, get_svc, get_svc_name, init, @@ -28,7 +27,7 @@ use qovery_engine::cloud_provider::aws::AWS; use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS; use qovery_engine::cloud_provider::digitalocean::DO; use qovery_engine::cloud_provider::environment::Environment; -use qovery_engine::cloud_provider::kubernetes::Kind as KKind; +use qovery_engine::cloud_provider::kubernetes::Kind as KubernetesKind; use qovery_engine::cloud_provider::kubernetes::Kubernetes; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule; @@ -65,7 +64,7 @@ pub trait Cluster { context: &Context, logger: Box, localisation: &str, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -1147,7 +1146,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - KKind::Eks, + KubernetesKind::Eks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1156,7 +1155,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - KKind::Doks, + KubernetesKind::Doks, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1165,7 +1164,7 @@ pub fn test_db( &context, logger.clone(), localisation.as_str(), - KKind::ScwKapsule, + KubernetesKind::ScwKapsule, kubernetes_version.clone(), &ClusterDomain::Default, None, @@ -1230,7 +1229,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), - KKind::Eks, + KubernetesKind::Eks, kubernetes_version, &ClusterDomain::Default, None, @@ -1239,7 +1238,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), - KKind::Doks, + KubernetesKind::Doks, kubernetes_version, &ClusterDomain::Default, None, @@ -1248,7 +1247,7 @@ pub fn test_db( &context_for_delete, logger.clone(), localisation.as_str(), - KKind::ScwKapsule, + KubernetesKind::ScwKapsule, kubernetes_version, &ClusterDomain::Default, None, @@ -1262,20 +1261,19 @@ pub fn test_db( } pub fn get_environment_test_kubernetes<'a>( - provider_kind: Kind, context: &Context, cloud_provider: Arc>, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, + kubernetes_version: &str, dns_provider: Arc>, logger: Box, localisation: &str, - kubernetes_version: &str, vpc_network_mode: Option, ) -> Box { let secrets = FuncTestsSecrets::new(); let kubernetes: Box = match kubernetes_kind { - KKind::Eks => { + KubernetesKind::Eks => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); if vpc_network_mode.is_some() { @@ -1300,7 +1298,7 @@ pub fn get_environment_test_kubernetes<'a>( .unwrap(), ) } - KKind::Ec2 => { + KubernetesKind::Ec2 => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); if vpc_network_mode.is_some() { @@ -1324,7 +1322,7 @@ pub fn get_environment_test_kubernetes<'a>( .unwrap(), ) } - KKind::Doks => { + KubernetesKind::Doks => { let region = DoRegion::from_str(localisation).expect("DO region not supported"); Box::new( DOKS::new( @@ -1343,7 +1341,7 @@ pub fn get_environment_test_kubernetes<'a>( .unwrap(), ) } - KKind::ScwKapsule => { + KubernetesKind::ScwKapsule => { let zone = ScwZone::from_str(localisation).expect("SCW zone not supported"); Box::new( Kapsule::new( @@ -1368,7 +1366,6 @@ pub fn get_environment_test_kubernetes<'a>( } pub fn get_cluster_test_kubernetes<'a>( - provider_kind: Kind, secrets: FuncTestsSecrets, context: &Context, cluster_id: String, @@ -1377,13 +1374,13 @@ pub fn get_cluster_test_kubernetes<'a>( localisation: &str, aws_zones: Option>, cloud_provider: Arc>, - kubernetes_provider: KKind, + kubernetes_provider: KubernetesKind, dns_provider: Arc>, vpc_network_mode: Option, logger: Box, ) -> Box { let kubernetes: Box = match kubernetes_provider { - KKind::Eks => { + KubernetesKind::Eks => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); if vpc_network_mode.is_some() { @@ -1409,7 +1406,7 @@ pub fn get_cluster_test_kubernetes<'a>( .unwrap(), ) } - KKind::Ec2 => { + KubernetesKind::Ec2 => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); if vpc_network_mode.is_some() { @@ -1434,7 +1431,7 @@ pub fn get_cluster_test_kubernetes<'a>( .unwrap(), ) } - KKind::Doks => Box::new( + KubernetesKind::Doks => Box::new( DOKS::new( context.clone(), cluster_id.clone(), @@ -1450,7 +1447,7 @@ pub fn get_cluster_test_kubernetes<'a>( ) .unwrap(), ), - KKind::ScwKapsule => Box::new( + KubernetesKind::ScwKapsule => Box::new( Kapsule::new( context.clone(), cluster_id.clone(), @@ -1474,7 +1471,7 @@ pub fn get_cluster_test_kubernetes<'a>( pub fn cluster_test( test_name: &str, provider_kind: Kind, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, context: Context, logger: Box, localisation: &str, @@ -1606,7 +1603,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, - KKind::Eks, + KubernetesKind::Eks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1615,7 +1612,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, - KKind::Doks, + KubernetesKind::Doks, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), @@ -1624,7 +1621,7 @@ pub fn cluster_test( &context, logger.clone(), localisation, - KKind::ScwKapsule, + KubernetesKind::ScwKapsule, upgrade_to_version, cluster_domain, vpc_network_mode.clone(), diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index e81e336f..c2be5aaa 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -3,7 +3,7 @@ use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions; use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind; use qovery_engine::cloud_provider::digitalocean::DO; -use qovery_engine::cloud_provider::kubernetes::Kind as KKind; +use qovery_engine::cloud_provider::kubernetes::Kind as KubernetesKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::docr::DOCR; @@ -14,9 +14,7 @@ use std::sync::Arc; use crate::cloudflare::dns_provider_cloudflare; use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; -use qovery_engine::cloud_provider::kubernetes::Kind; use qovery_engine::cloud_provider::qovery::EngineLocation; -use qovery_engine::cloud_provider::Kind::Do; use qovery_engine::dns_provider::DnsProvider; use qovery_engine::errors::EngineError; use qovery_engine::logger::Logger; @@ -50,7 +48,7 @@ pub fn do_default_engine_config(context: &Context, logger: Box) -> E &context, logger, DO_TEST_REGION.to_string().as_str(), - KKind::Doks, + KubernetesKind::Doks, DO_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -62,7 +60,7 @@ impl Cluster for DO { context: &Context, logger: Box, localisation: &str, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -77,14 +75,13 @@ impl Cluster for DO { let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let k = get_environment_test_kubernetes( - Do, context, cloud_provider.clone(), kubernetes_kind, + kubernetes_version.as_str(), dns_provider.clone(), logger.clone(), localisation, - kubernetes_version.as_str(), vpc_network_mode, ); diff --git a/test_utilities/src/lib.rs b/test_utilities/src/lib.rs index 14b7316b..0cec3383 100644 --- a/test_utilities/src/lib.rs +++ b/test_utilities/src/lib.rs @@ -4,9 +4,9 @@ extern crate maplit; pub mod aws; +pub mod aws_ec2; pub mod cloudflare; pub mod common; pub mod digitalocean; -pub mod edge_aws_rs; pub mod scaleway; pub mod utilities; diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 4db9b9ff..20d2d5c3 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -5,13 +5,11 @@ use tracing::error; use qovery_engine::build_platform::Build; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; -use qovery_engine::cloud_provider::kubernetes::Kind; -use qovery_engine::cloud_provider::kubernetes::Kind as KKind; +use qovery_engine::cloud_provider::kubernetes::Kind as KubernetesKind; use qovery_engine::cloud_provider::models::NodeGroups; use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::scaleway::kubernetes::KapsuleOptions; use qovery_engine::cloud_provider::scaleway::Scaleway; -use qovery_engine::cloud_provider::Kind::Scw; use qovery_engine::cloud_provider::{CloudProvider, TerraformStateCredentials}; use qovery_engine::container_registry::errors::ContainerRegistryError; use qovery_engine::container_registry::scaleway_container_registry::ScalewayCR; @@ -72,7 +70,7 @@ pub fn scw_default_engine_config(context: &Context, logger: Box) -> &context, logger, SCW_TEST_ZONE.to_string().as_str(), - KKind::ScwKapsule, + KubernetesKind::ScwKapsule, SCW_KUBERNETES_VERSION.to_string(), &ClusterDomain::Default, None, @@ -84,7 +82,7 @@ impl Cluster for Scaleway { context: &Context, logger: Box, localisation: &str, - kubernetes_kind: KKind, + kubernetes_kind: KubernetesKind, kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, @@ -100,14 +98,13 @@ impl Cluster for Scaleway { let dns_provider: Arc> = Arc::new(dns_provider_cloudflare(context, cluster_domain)); let cluster = get_environment_test_kubernetes( - Scw, context, cloud_provider.clone(), - Kind::ScwKapsule, + kubernetes_kind.clone(), + kubernetes_version.as_str(), dns_provider.clone(), logger.clone(), localisation, - kubernetes_version.as_str(), vpc_network_mode, ); diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 425647c5..8f60bb8f 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -5,14 +5,14 @@ use ::function_name::named; use qovery_engine::cloud_provider::kubernetes::Kind as KKind; use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode; -use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::{WithNatGateways, WithoutNatGateways}; +use qovery_engine::cloud_provider::aws::kubernetes::VpcQoveryNetworkMode::WithoutNatGateways; use qovery_engine::cloud_provider::aws::regions::AwsRegion; use qovery_engine::cloud_provider::Kind; use std::str::FromStr; use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; -pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; +pub const _AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; #[cfg(feature = "test-aws-infra-ec2")] fn create_and_destroy_aws_ec2_k3s_cluster( From 4567fbf9d56f0ba33b88a89afb8c0ae7b986434b Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Wed, 27 Apr 2022 14:24:01 +0200 Subject: [PATCH 081/122] review: applying review feedbacks --- src/cloud_provider/aws/kubernetes/mod.rs | 37 +++++++++--------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 54b7ab17..4ef90fbe 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -774,7 +774,7 @@ fn pause( kubernetes.logger().log(EngineEvent::Info( kubernetes.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe(format!("Preparing {} cluster pause.", kubernetes.kind())), + EventMessage::new_from_safe("Preparing cluster pause.".to_string()), )); let temp_dir = kubernetes.get_temp_dir(event_details.clone())?; @@ -911,7 +911,7 @@ fn pause( kubernetes.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Pausing EKS cluster deployment.".to_string()), + EventMessage::new_from_safe("Pausing cluster deployment.".to_string()), )); match terraform_exec(temp_dir.as_str(), terraform_args) { @@ -1149,13 +1149,10 @@ fn delete( event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - kubernetes.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Can't delete chart `{}`", &chart.name), Some(e.to_string())), + )), } } } @@ -1201,23 +1198,17 @@ fn delete( event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - kubernetes.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Error deleting chart `{}`", chart.name), Some(e.to_string())), + )), } } } - Err(e) => { - let message_safe = "Unable to get helm list"; - kubernetes.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - )) - } + Err(e) => kubernetes.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new("Unable to get helm list".to_string(), Some(e.to_string())), + )), } }; From b8d0923d2ce10ce22435a0cb001c38b54eb23002 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Wed, 27 Apr 2022 16:14:51 +0200 Subject: [PATCH 082/122] comment: flag EC2 k8s provider --- src/cloud_provider/aws/kubernetes/ec2.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cloud_provider/aws/kubernetes/ec2.rs b/src/cloud_provider/aws/kubernetes/ec2.rs index defb2242..60ba9f95 100644 --- a/src/cloud_provider/aws/kubernetes/ec2.rs +++ b/src/cloud_provider/aws/kubernetes/ec2.rs @@ -17,6 +17,7 @@ use function_name::named; use std::borrow::Borrow; use std::sync::Arc; +/// EC2 kubernetes provider allowing to deploy a cluster on single EC2 node. pub struct EC2 { context: Context, id: String, From e01b17b03094bba36a5a1332b7abf82f214514c8 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Thu, 28 Apr 2022 10:28:23 +0200 Subject: [PATCH 083/122] comment: flag EKS k8s provider --- src/cloud_provider/aws/kubernetes/eks.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cloud_provider/aws/kubernetes/eks.rs b/src/cloud_provider/aws/kubernetes/eks.rs index e3a78fa3..4cbe5a18 100644 --- a/src/cloud_provider/aws/kubernetes/eks.rs +++ b/src/cloud_provider/aws/kubernetes/eks.rs @@ -25,6 +25,7 @@ use std::borrow::Borrow; use std::str::FromStr; use std::sync::Arc; +/// EKS kubernetes provider allowing to deploy an EKS cluster. pub struct EKS { context: Context, id: String, From 4f2685e8415f3d7cdbd5c0f134ccecb4c9bba95c Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Thu, 28 Apr 2022 11:23:23 +0200 Subject: [PATCH 084/122] fix: prevent error message leak (#700) --- src/cloud_provider/aws/kubernetes/mod.rs | 36 +++++------ .../digitalocean/kubernetes/mod.rs | 59 ++++++++----------- src/cloud_provider/scaleway/kubernetes/mod.rs | 19 +++--- 3 files changed, 52 insertions(+), 62 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 89816f5b..79faf7f3 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -1159,13 +1159,10 @@ impl EKS { event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Can't delete chart `{}`", &chart.name), Some(e.to_string())), + )), } } } @@ -1211,23 +1208,20 @@ impl EKS { event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!("Error deleting chart `{}`", chart.name), + Some(e.to_string()), + ), + )), } } } - Err(e) => { - let message_safe = "Unable to get helm list"; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new("Unable to get helm list".to_string(), Some(e.to_string())), + )), } }; diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 4afe105d..d9795030 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -463,7 +463,7 @@ impl DOKS { )); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Preparing DOKS cluster deployment.".to_string()), + EventMessage::new_from_safe("Preparing cluster deployment.".to_string()), )); // upgrade cluster instead if required @@ -533,7 +533,7 @@ impl DOKS { self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Deploying DOKS cluster.".to_string()), + EventMessage::new_from_safe("Deploying cluster.".to_string()), )); self.send_to_customer( format!("Deploying DOKS {} cluster deployment with id {}", self.name(), self.id()).as_str(), @@ -550,7 +550,7 @@ impl DOKS { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { Ok(_) => self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + EventMessage::new_from_safe(format!("Successfully removed {}", &entry)), )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( @@ -802,7 +802,7 @@ impl DOKS { ); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Preparing to delete DOKS cluster.".to_string()), + EventMessage::new_from_safe("Preparing to delete cluster.".to_string()), )); let temp_dir = match self.get_temp_dir(event_details.clone()) { @@ -923,13 +923,12 @@ impl DOKS { } } Err(e) => { - let message_safe = format!( - "Error while getting all namespaces for Kubernetes cluster {}", - self.name_with_id(), - ); self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), + EventMessage::new( + "Error while getting all namespaces for Kubernetes cluster".to_string(), + Some(e.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars)), + ), )); } } @@ -976,13 +975,10 @@ impl DOKS { event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Can't delete chart `{}`", chart.name); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new(format!("Can't delete chart `{}`", chart.name), Some(e.to_string())), + )), } } } @@ -1028,23 +1024,20 @@ impl DOKS { event_details.clone(), EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)), )), - Err(e) => { - let message_safe = format!("Error deleting chart `{}`", chart.name); - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe, Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new( + format!("Error deleting chart `{}`", chart.name), + Some(e.to_string()), + ), + )), } } } - Err(e) => { - let message_safe = "Unable to get helm list"; - self.logger().log(EngineEvent::Warning( - event_details.clone(), - EventMessage::new(message_safe.to_string(), Some(e.to_string())), - )) - } + Err(e) => self.logger().log(EngineEvent::Warning( + event_details.clone(), + EventMessage::new("Unable to get helm list".to_string(), Some(e.to_string())), + )), } }; @@ -1175,8 +1168,8 @@ impl Kubernetes for DOKS { self.logger().log(EngineEvent::Debug( self.get_event_details(stage), EventMessage::new( - err.to_string(), - Some(format!("Error, couldn't open {} file", &local_kubeconfig_generated,)), + format!("Error, couldn't open {} file", &local_kubeconfig_generated), + Some(err.to_string()), ), )); None @@ -1330,7 +1323,7 @@ impl Kubernetes for DOKS { ); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Start preparing DOKS cluster upgrade process".to_string()), + EventMessage::new_from_safe("Start preparing cluster upgrade process".to_string()), )); let temp_dir = self.get_temp_dir(event_details.clone())?; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index b8d9e693..0fec4092 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -674,7 +674,7 @@ impl Kapsule { match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { Ok(_) => self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), + EventMessage::new_from_safe(format!("Successfully removed {}", &entry)), )), Err(e) => { return Err(EngineError::new_terraform_cannot_remove_entry_out( @@ -909,7 +909,7 @@ impl Kapsule { self.logger.log(EngineEvent::Info( event_details.clone(), EventMessage::new_from_safe( - "all node groups for this cluster are ready from cloud provider API".to_string(), + "All node groups for this cluster are ready from cloud provider API".to_string(), ), )); @@ -1005,7 +1005,7 @@ impl Kapsule { event_details, EventMessage::new( "Error trying to get kubernetes events".to_string(), - Some(err.message(ErrorMessageVerbosity::FullDetails)), + Some(err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars)), ), )), }; @@ -1046,7 +1046,7 @@ impl Kapsule { self.logger().log(EngineEvent::Info( self.get_event_details(Stage::Infrastructure(InfrastructureStep::Pause)), - EventMessage::new_from_safe("Preparing SCW cluster pause.".to_string()), + EventMessage::new_from_safe("Preparing cluster pause.".to_string()), )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1177,7 +1177,7 @@ impl Kapsule { ); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Pausing SCW cluster deployment.".to_string()), + EventMessage::new_from_safe("Pausing cluster deployment.".to_string()), )); match terraform_exec(temp_dir.as_str(), terraform_args) { @@ -1212,7 +1212,7 @@ impl Kapsule { ); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Preparing to delete SCW cluster.".to_string()), + EventMessage::new_from_safe("Preparing to delete cluster.".to_string()), )); let temp_dir = self.get_temp_dir(event_details.clone())?; @@ -1335,7 +1335,10 @@ impl Kapsule { ); self.logger().log(EngineEvent::Warning( event_details.clone(), - EventMessage::new(message_safe, Some(e.message(ErrorMessageVerbosity::FullDetails))), + EventMessage::new( + message_safe, + Some(e.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars)), + ), )); } } @@ -1605,7 +1608,7 @@ impl Kubernetes for Kapsule { ); self.logger().log(EngineEvent::Info( event_details.clone(), - EventMessage::new_from_safe("Start preparing SCW cluster upgrade process".to_string()), + EventMessage::new_from_safe("Start preparing cluster upgrade process".to_string()), )); let temp_dir = self.get_temp_dir(event_details.clone())?; From 6cffa9fc9a0c42aa332cc042cabb4825cf4e670b Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Thu, 28 Apr 2022 16:37:40 +0200 Subject: [PATCH 085/122] fix: images for dbs tests (#701) --- test_utilities/src/utilities.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 93963e8f..9ec311e6 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -939,7 +939,7 @@ pub fn db_infos( DBInfos { db_port: database_port.clone(), db_name: database_db_name.to_string(), - app_commit: "3fdc7e784c1d98b80446be7ff25e35370306d9a8".to_string(), + app_commit: "da5dd2b58b78576921373fcb4d4bddc796a804a8".to_string(), app_env_vars: btreemap! { "IS_DOCUMENTDB".to_string() => base64::encode((database_mode == MANAGED).to_string()), "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string() => base64::encode(db_fqdn.clone()), @@ -957,7 +957,7 @@ pub fn db_infos( DBInfos { db_port: database_port.clone(), db_name: database_db_name.to_string(), - app_commit: "fc8a87b39cdee84bb789893fb823e3e62a1999c0".to_string(), + app_commit: "42f6553b6be617f954f903e01236e225bbb9f468".to_string(), app_env_vars: btreemap! { "MYSQL_HOST".to_string() => base64::encode(db_fqdn.clone()), "MYSQL_PORT".to_string() => base64::encode(database_port.to_string()), @@ -977,7 +977,7 @@ pub fn db_infos( DBInfos { db_port: database_port.clone(), db_name: database_db_name.to_string(), - app_commit: "c3eda167df49fa9757f281d6f3655ba46287c61d".to_string(), + app_commit: "61c7a9b55a085229583b6a394dd168a4159dfd09".to_string(), app_env_vars: btreemap! { "PG_DBNAME".to_string() => base64::encode(database_db_name.clone()), "PG_HOST".to_string() => base64::encode(db_fqdn.clone()), @@ -993,7 +993,7 @@ pub fn db_infos( DBInfos { db_port: database_port.clone(), db_name: database_db_name.to_string(), - app_commit: "80ad41fbe9549f8de8dbe2ca4dd5d23e8ffc92de".to_string(), + app_commit: "e4b1162741ce162b834b68498e43bf60f0f58cbe".to_string(), app_env_vars: btreemap! { "IS_ELASTICCACHE".to_string() => base64::encode((database_mode == MANAGED).to_string()), "REDIS_HOST".to_string() => base64::encode(db_fqdn.clone()), From d9d04a93f772e70cda49924247293ec514c8e9f6 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Thu, 28 Apr 2022 17:48:31 +0200 Subject: [PATCH 086/122] fix: setup proper tf values for ec2 --- lib/aws-ec2/bootstrap/documentdb.tf | 10 +++++----- lib/aws-ec2/bootstrap/ec2-sec-group.tf | 16 +++------------- lib/aws-ec2/bootstrap/ec2-vpc.j2.tf | 14 +++++++------- lib/aws-ec2/bootstrap/ec2.j2.tf | 19 ++++++++++++------- lib/aws-ec2/bootstrap/elasticcache.tf | 10 +++++----- lib/aws-ec2/bootstrap/elasticsearch.tf | 6 +++--- lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf | 12 ++++++++++++ lib/aws-ec2/bootstrap/qovery-vault.j2.tf | 11 ++++++----- lib/aws-ec2/bootstrap/rds.tf | 12 ++++++------ lib/aws-ec2/bootstrap/tags.j2.tf | 17 +++++++++++++++++ lib/aws-ec2/services/common/providers.j2.tf | 6 +++--- 11 files changed, 79 insertions(+), 54 deletions(-) create mode 100644 lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf create mode 100644 lib/aws-ec2/bootstrap/tags.j2.tf diff --git a/lib/aws-ec2/bootstrap/documentdb.tf b/lib/aws-ec2/bootstrap/documentdb.tf index 04ca6934..7828faf5 100644 --- a/lib/aws-ec2/bootstrap/documentdb.tf +++ b/lib/aws-ec2/bootstrap/documentdb.tf @@ -1,6 +1,6 @@ locals { tags_documentdb = merge( - aws_ec2_cluster.ec2_cluster.tags, + aws_instance.ec2_instance.tags, { "Service" = "DocumentDB" } @@ -43,21 +43,21 @@ resource "aws_route_table_association" "documentdb_cluster_zone_a" { count = length(var.documentdb_subnets_zone_a) subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } resource "aws_route_table_association" "documentdb_cluster_zone_b" { count = length(var.documentdb_subnets_zone_b) subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } resource "aws_route_table_association" "documentdb_cluster_zone_c" { count = length(var.documentdb_subnets_zone_c) subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } resource "aws_docdb_subnet_group" "documentdb" { @@ -75,7 +75,7 @@ resource "aws_security_group_rule" "documentdb_remote_access" { description = "Allow DocumentDB incoming access from anywhere" from_port = 27017 protocol = "tcp" - security_group_id = aws_security_group.ec2_cluster_workers.id + security_group_id = aws_security_group.ec2_instance.id to_port = 27017 type = "ingress" } diff --git a/lib/aws-ec2/bootstrap/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf index a82bd0e9..f8c94814 100644 --- a/lib/aws-ec2/bootstrap/ec2-sec-group.tf +++ b/lib/aws-ec2/bootstrap/ec2-sec-group.tf @@ -1,4 +1,4 @@ -resource "aws_security_group" "ec2_cluster" { +resource "aws_security_group" "ec2_instance" { name = "qovery-ec2-${var.kubernetes_cluster_id}" description = "Cluster communication with worker nodes" vpc_id = aws_vpc.ec2.id @@ -14,21 +14,11 @@ resource "aws_security_group" "ec2_cluster" { } resource "aws_security_group_rule" "https" { - cidr_blocks = "0.0.0.0/0" + cidr_blocks = ["0.0.0.0/0"] description = "HTTPS connectivity" from_port = 443 protocol = "tcp" - security_group_id = aws_security_group.ec2_cluster.id + security_group_id = aws_security_group.ec2_instance.id to_port = 443 type = "ingress" -} - -resource "aws_security_group_rule" "ssh" { - cidr_blocks = "0.0.0.0/0" - description = "SSH remote access" - from_port = 22 - protocol = "tcp" - security_group_id = aws_security_group.ec2_cluster.id - to_port = 22 - type = "ssh" } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf b/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf index 27915b66..fdcaf9fe 100644 --- a/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf +++ b/lib/aws-ec2/bootstrap/ec2-vpc.j2.tf @@ -32,7 +32,7 @@ resource "aws_subnet" "ec2_zone_c" { tags = local.tags_ec2_vpc } -resource "aws_route_table" "ec2_cluster" { +resource "aws_route_table" "ec2_instance" { vpc_id = aws_vpc.ec2.id route { @@ -50,23 +50,23 @@ resource "aws_route_table" "ec2_cluster" { tags = local.tags_ec2_vpc } -resource "aws_route_table_association" "ec2_cluster_zone_a" { +resource "aws_route_table_association" "ec2_instance_zone_a" { count = length(var.ec2_subnets_zone_a_private) subnet_id = aws_subnet.ec2_zone_a.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } -resource "aws_route_table_association" "ec2_cluster_zone_b" { +resource "aws_route_table_association" "ec2_instance_zone_b" { count = length(var.ec2_subnets_zone_b_private) subnet_id = aws_subnet.ec2_zone_b.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } -resource "aws_route_table_association" "ec2_cluster_zone_c" { +resource "aws_route_table_association" "ec2_instance_zone_c" { count = length(var.ec2_subnets_zone_c_private) subnet_id = aws_subnet.ec2_zone_c.*.id[count.index] - route_table_id = aws_route_table.ec2_cluster.id + route_table_id = aws_route_table.ec2_instance.id } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf index 2a9bb030..756276ef 100644 --- a/lib/aws-ec2/bootstrap/ec2.j2.tf +++ b/lib/aws-ec2/bootstrap/ec2.j2.tf @@ -16,7 +16,7 @@ data "aws_ami" "debian" { owners = [var.ec2_image_info.owners] } -resource "aws_instance" "web" { +resource "aws_instance" "ec2_instance" { ami = data.aws_ami.debian.id instance_type = var.ec2_instance.instance_type @@ -31,17 +31,22 @@ resource "aws_instance" "web" { associate_public_ip_address = true # security - vpc_security_group_ids = [aws_vpc.ec2.id] - subnet_id = aws_subnet.ec2_zone_a.id - security_groups = [aws_security_group.ec2_cluster.id] + vpc_security_group_ids = [aws_security_group.ec2_instance.id] + subnet_id = aws_subnet.ec2_zone_a[0].id + security_groups = [aws_security_group.ec2_instance.id] user_data = local.bootstrap - tags = { - Name = "HelloWorld" - } + tags = merge( + local.tags_common, + { + "Service" = "EC2" + } + ) } +resource "time_static" "on_ec2_create" {} + locals { bootstrap = < Date: Thu, 28 Apr 2022 18:17:45 +0200 Subject: [PATCH 087/122] fix: setup proper tf values for ec2 --- lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf index 318db00b..0044f5f0 100644 --- a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf +++ b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf @@ -1,6 +1,15 @@ locals { qovery_tf_config = < Date: Fri, 29 Apr 2022 13:50:53 +0200 Subject: [PATCH 088/122] Add long_id for applications (#702) --- .../templates/deployment.j2.yaml | 2 ++ .../templates/horizontal_autoscaler.j2.yaml | 1 + .../templates/networkpolicies.j2.yaml | 3 ++ .../q-application/templates/pdb.j2.yaml | 1 + .../q-application/templates/secret.j2.yaml | 1 + .../q-application/templates/service.j2.yaml | 1 + .../templates/statefulset.j2.yaml | 24 ++----------- .../templates/deployment.j2.yaml | 2 ++ .../templates/horizontal_autoscaler.j2.yaml | 1 + .../q-application/templates/pdb.j2.yaml | 1 + .../q-application/templates/secret.j2.yaml | 1 + .../q-application/templates/service.j2.yaml | 1 + .../templates/statefulset.j2.yaml | 24 ++----------- .../templates/deployment.j2.yaml | 2 ++ .../templates/horizontal_autoscaler.j2.yaml | 1 + .../templates/networkpolicies.j2.yaml | 2 ++ .../q-application/templates/pdb.j2.yaml | 1 + .../q-application/templates/secret.j2.yaml | 2 ++ .../q-application/templates/service.j2.yaml | 1 + .../templates/statefulset.j2.yaml | 24 ++----------- src/io_models.rs | 13 ++++--- src/models/application.rs | 12 ++++--- src/utilities.rs | 5 +++ test_utilities/src/common.rs | 35 +++++++++++-------- tests/aws/aws_environment.rs | 19 ++++++++-- tests/digitalocean/do_environment.rs | 6 +++- tests/scaleway/scw_environment.rs | 8 +++-- 27 files changed, 98 insertions(+), 96 deletions(-) diff --git a/lib/aws/charts/q-application/templates/deployment.j2.yaml b/lib/aws/charts/q-application/templates/deployment.j2.yaml index b942e99f..a292b299 100644 --- a/lib/aws/charts/q-application/templates/deployment.j2.yaml +++ b/lib/aws/charts/q-application/templates/deployment.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -29,6 +30,7 @@ spec: template: metadata: labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/aws/charts/q-application/templates/horizontal_autoscaler.j2.yaml b/lib/aws/charts/q-application/templates/horizontal_autoscaler.j2.yaml index d14331e7..e3424d15 100644 --- a/lib/aws/charts/q-application/templates/horizontal_autoscaler.j2.yaml +++ b/lib/aws/charts/q-application/templates/horizontal_autoscaler.j2.yaml @@ -7,6 +7,7 @@ metadata: labels: envId: {{ environment_id }} appId: {{ id }} + appLongId: {{ long_id }} spec: scaleTargetRef: apiVersion: apps/v1 diff --git a/lib/aws/charts/q-application/templates/networkpolicies.j2.yaml b/lib/aws/charts/q-application/templates/networkpolicies.j2.yaml index 14ab70f4..09c4abeb 100644 --- a/lib/aws/charts/q-application/templates/networkpolicies.j2.yaml +++ b/lib/aws/charts/q-application/templates/networkpolicies.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }}-default namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} @@ -26,6 +27,7 @@ metadata: name: {{ sanitized_name }}-app-access namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} @@ -76,6 +78,7 @@ metadata: name: {{ sanitized_name }}-deny-aws-metadata-server namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} diff --git a/lib/aws/charts/q-application/templates/pdb.j2.yaml b/lib/aws/charts/q-application/templates/pdb.j2.yaml index 4e8b8015..22be8da3 100644 --- a/lib/aws/charts/q-application/templates/pdb.j2.yaml +++ b/lib/aws/charts/q-application/templates/pdb.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/aws/charts/q-application/templates/secret.j2.yaml b/lib/aws/charts/q-application/templates/secret.j2.yaml index 86625f6e..df3accd0 100644 --- a/lib/aws/charts/q-application/templates/secret.j2.yaml +++ b/lib/aws/charts/q-application/templates/secret.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/aws/charts/q-application/templates/service.j2.yaml b/lib/aws/charts/q-application/templates/service.j2.yaml index bb258f90..588b0a97 100644 --- a/lib/aws/charts/q-application/templates/service.j2.yaml +++ b/lib/aws/charts/q-application/templates/service.j2.yaml @@ -9,6 +9,7 @@ metadata: appId: {{ id }} app: {{ sanitized_name }} envId: {{ environment_id }} + appLongId: {{ long_id }} spec: type: ClusterIP ports: diff --git a/lib/aws/charts/q-application/templates/statefulset.j2.yaml b/lib/aws/charts/q-application/templates/statefulset.j2.yaml index fb7cf72b..867a9f92 100644 --- a/lib/aws/charts/q-application/templates/statefulset.j2.yaml +++ b/lib/aws/charts/q-application/templates/statefulset.j2.yaml @@ -9,6 +9,7 @@ metadata: ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} + appLongId: {{ long_id }} app: {{ sanitized_name }} annotations: releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %} @@ -27,6 +28,7 @@ spec: ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} + appLongId: {{ long_id }} app: {{ sanitized_name }} annotations: checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %} @@ -91,27 +93,6 @@ spec: {%- endfor %} volumeClaimTemplates: {%- for s in storage %} -{% if clone %} - - metadata: - name: {{ s.id }} - labels: - ownerId: {{ owner_id }} - envId: {{ environment_id }} - appId: {{ id }} - app: {{ sanitized_name }} - diskId: {{ s.id }} - diskType: {{ s.storage_type }} - spec: - accessModes: - - ReadWriteOnce - storageClassName: aws-ebs-{{ s.storage_type }}-0 - dataSource: - name: {{ s.id }} - kind: PersistentVolumeClaim - resources: - requests: - storage: {{ disk.size_in_gib }}Gi -{% else %} - metadata: name: {{ s.id }} labels: @@ -127,6 +108,5 @@ spec: resources: requests: storage: {{ s.size_in_gib }}Gi -{%- endif %} {%- endfor %} {%- endif %} diff --git a/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml b/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml index b942e99f..a292b299 100644 --- a/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/deployment.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -29,6 +30,7 @@ spec: template: metadata: labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/digitalocean/charts/q-application/templates/horizontal_autoscaler.j2.yaml b/lib/digitalocean/charts/q-application/templates/horizontal_autoscaler.j2.yaml index d14331e7..04f72ef6 100644 --- a/lib/digitalocean/charts/q-application/templates/horizontal_autoscaler.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/horizontal_autoscaler.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} envId: {{ environment_id }} appId: {{ id }} spec: diff --git a/lib/digitalocean/charts/q-application/templates/pdb.j2.yaml b/lib/digitalocean/charts/q-application/templates/pdb.j2.yaml index 4e8b8015..22be8da3 100644 --- a/lib/digitalocean/charts/q-application/templates/pdb.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/pdb.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/digitalocean/charts/q-application/templates/secret.j2.yaml b/lib/digitalocean/charts/q-application/templates/secret.j2.yaml index 86625f6e..df3accd0 100644 --- a/lib/digitalocean/charts/q-application/templates/secret.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/secret.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/digitalocean/charts/q-application/templates/service.j2.yaml b/lib/digitalocean/charts/q-application/templates/service.j2.yaml index bb258f90..6f4bc396 100644 --- a/lib/digitalocean/charts/q-application/templates/service.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/service.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} diff --git a/lib/digitalocean/charts/q-application/templates/statefulset.j2.yaml b/lib/digitalocean/charts/q-application/templates/statefulset.j2.yaml index 63dae842..66e34216 100644 --- a/lib/digitalocean/charts/q-application/templates/statefulset.j2.yaml +++ b/lib/digitalocean/charts/q-application/templates/statefulset.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -24,6 +25,7 @@ spec: template: metadata: labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -91,27 +93,6 @@ spec: {%- endfor %} volumeClaimTemplates: {%- for s in storage %} -{% if clone %} - - metadata: - name: {{ s.id }} - labels: - ownerId: {{ owner_id }} - envId: {{ environment_id }} - appId: {{ id }} - app: {{ sanitized_name }} - diskId: {{ s.id }} - diskType: {{ s.storage_type }} - spec: - accessModes: - - ReadWriteOnce - storageClassName: {{ s.storage_type }} - dataSource: - name: {{ s.id }} - kind: PersistentVolumeClaim - resources: - requests: - storage: {{ disk.size_in_gib }}Gi -{% else %} - metadata: name: {{ s.id }} labels: @@ -127,6 +108,5 @@ spec: resources: requests: storage: {{ s.size_in_gib }}Gi -{%- endif %} {%- endfor %} {%- endif %} diff --git a/lib/scaleway/charts/q-application/templates/deployment.j2.yaml b/lib/scaleway/charts/q-application/templates/deployment.j2.yaml index 9d407b87..7c5d7531 100644 --- a/lib/scaleway/charts/q-application/templates/deployment.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/deployment.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -28,6 +29,7 @@ spec: template: metadata: labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/scaleway/charts/q-application/templates/horizontal_autoscaler.j2.yaml b/lib/scaleway/charts/q-application/templates/horizontal_autoscaler.j2.yaml index d14331e7..04f72ef6 100644 --- a/lib/scaleway/charts/q-application/templates/horizontal_autoscaler.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/horizontal_autoscaler.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} envId: {{ environment_id }} appId: {{ id }} spec: diff --git a/lib/scaleway/charts/q-application/templates/networkpolicies.j2.yaml b/lib/scaleway/charts/q-application/templates/networkpolicies.j2.yaml index 01e5a43f..4949144d 100644 --- a/lib/scaleway/charts/q-application/templates/networkpolicies.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/networkpolicies.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }}-default namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} @@ -26,6 +27,7 @@ metadata: name: {{ sanitized_name }}-app-access namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} diff --git a/lib/scaleway/charts/q-application/templates/pdb.j2.yaml b/lib/scaleway/charts/q-application/templates/pdb.j2.yaml index 4e8b8015..22be8da3 100644 --- a/lib/scaleway/charts/q-application/templates/pdb.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/pdb.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} diff --git a/lib/scaleway/charts/q-application/templates/secret.j2.yaml b/lib/scaleway/charts/q-application/templates/secret.j2.yaml index 1e78760f..d41566d0 100644 --- a/lib/scaleway/charts/q-application/templates/secret.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/secret.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -23,6 +24,7 @@ metadata: name: {{ registry_secret_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} envId: {{ environment_id }} appId: {{ id }} app: {{ sanitized_name }} diff --git a/lib/scaleway/charts/q-application/templates/service.j2.yaml b/lib/scaleway/charts/q-application/templates/service.j2.yaml index 22ffc5ba..2bd35e24 100644 --- a/lib/scaleway/charts/q-application/templates/service.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/service.j2.yaml @@ -5,6 +5,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} appId: {{ id }} app: {{ sanitized_name }} diff --git a/lib/scaleway/charts/q-application/templates/statefulset.j2.yaml b/lib/scaleway/charts/q-application/templates/statefulset.j2.yaml index 0af89e5c..1e7160d8 100644 --- a/lib/scaleway/charts/q-application/templates/statefulset.j2.yaml +++ b/lib/scaleway/charts/q-application/templates/statefulset.j2.yaml @@ -6,6 +6,7 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -24,6 +25,7 @@ spec: template: metadata: labels: + appLongId: {{ long_id }} ownerId: {{ owner_id }} envId: {{ environment_id }} appId: {{ id }} @@ -91,27 +93,6 @@ spec: {%- endfor %} volumeClaimTemplates: {%- for s in storage %} -{% if clone %} - - metadata: - name: {{ s.id }} - labels: - ownerId: {{ owner_id }} - envId: {{ environment_id }} - appId: {{ id }} - app: {{ sanitized_name }} - diskId: {{ s.id }} - diskType: {{ s.storage_type }} - spec: - accessModes: - - ReadWriteOnce - storageClassName: {{ s.storage_type }} - dataSource: - name: {{ s.id }} - kind: PersistentVolumeClaim - resources: - requests: - storage: {{ disk.size_in_gib }}Gi -{% else %} - metadata: name: {{ s.id }} labels: @@ -127,6 +108,5 @@ spec: resources: requests: storage: {{ s.size_in_gib }}Gi -{%- endif %} {%- endfor %} {%- endif %} diff --git a/src/io_models.rs b/src/io_models.rs index 5c57e8cd..8985fe90 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -12,6 +12,7 @@ use rand::distributions::Alphanumeric; use rand::Rng; use serde::{Deserialize, Serialize}; use url::Url; +use uuid::Uuid; use crate::build_platform::{Build, Credentials, GitRepository, Image, SshKey}; use crate::cloud_provider::environment::Environment; @@ -29,6 +30,7 @@ use crate::models::digital_ocean::{DoAppExtraSettings, DoRouterExtraSettings, Do use crate::models::router::RouterError; use crate::models::scaleway::{ScwAppExtraSettings, ScwRouterExtraSettings, ScwStorageType}; use crate::models::types::{CloudProvider as CP, VersionsNumber, AWS, DO, SCW}; +use crate::utilities::to_short_id; #[derive(Clone, Debug, PartialEq)] pub struct QoveryIdentifier { @@ -201,7 +203,7 @@ impl Default for ApplicationAdvanceSettings { #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct Application { - pub id: String, + pub long_id: Uuid, pub name: String, pub action: Action, pub git_url: String, @@ -240,7 +242,7 @@ impl Application { match cloud_provider.kind() { CPKind::Aws => Ok(Box::new(models::application::Application::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), self.ports.clone(), @@ -259,7 +261,7 @@ impl Application { )?)), CPKind::Do => Ok(Box::new(models::application::Application::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), self.ports.clone(), @@ -278,7 +280,7 @@ impl Application { )?)), CPKind::Scw => Ok(Box::new(models::application::Application::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), self.ports.clone(), @@ -300,7 +302,7 @@ impl Application { fn to_image(&self, cr_info: &ContainerRegistryInfo) -> Image { Image { - application_id: self.id.clone(), + application_id: to_short_id(&self.long_id), name: (cr_info.get_image_name)(&self.name), tag: "".to_string(), // It needs to be compute after creation commit_id: self.commit_id.clone(), @@ -439,6 +441,7 @@ pub struct GitCredentials { #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct Storage { pub id: String, + pub long_id: Uuid, pub name: String, pub storage_type: StorageType, pub size_in_gib: u16, diff --git a/src/models/application.rs b/src/models/application.rs index 71fd38e2..4fe82864 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -15,9 +15,11 @@ use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmi use crate::io_models::{ApplicationAdvanceSettings, Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; use crate::models::types::{CloudProvider, ToTeraContext}; +use crate::utilities::to_short_id; use function_name::named; use std::marker::PhantomData; use tera::Context as TeraContext; +use uuid::Uuid; #[derive(thiserror::Error, Debug)] pub enum ApplicationError { @@ -29,6 +31,7 @@ pub struct Application { _marker: PhantomData, pub(super) context: Context, pub(super) id: String, + pub(super) long_id: Uuid, pub(super) action: Action, pub(super) name: String, pub(super) ports: Vec, @@ -50,7 +53,7 @@ pub struct Application { impl Application { pub fn new( context: Context, - id: &str, + long_id: Uuid, action: Action, name: &str, ports: Vec, @@ -72,7 +75,8 @@ impl Application { Ok(Self { _marker: PhantomData, context, - id: id.to_string(), + id: to_short_id(&long_id), + long_id, action, name: name.to_string(), ports, @@ -94,6 +98,7 @@ impl Application { pub(super) fn default_tera_context(&self, kubernetes: &dyn Kubernetes, environment: &Environment) -> TeraContext { let mut context = TeraContext::new(); context.insert("id", self.id()); + context.insert("long_id", &self.long_id); context.insert("owner_id", environment.owner_id.as_str()); context.insert("project_id", environment.project_id.as_str()); context.insert("organization_id", environment.organization_id.as_str()); @@ -140,9 +145,6 @@ impl Application { context.insert("is_registry_secret", &true); context.insert("registry_secret", self.build().image.registry_host()); - // TODO: Remove this - context.insert("clone", &false); - if self.context.resource_expiration_in_seconds().is_some() { context.insert("resource_expiration_in_seconds", &self.context.resource_expiration_in_seconds()) } diff --git a/src/utilities.rs b/src/utilities.rs index d846d63c..f373edd6 100644 --- a/src/utilities.rs +++ b/src/utilities.rs @@ -5,6 +5,7 @@ use std::path::Path; use reqwest::header; use reqwest::header::{HeaderMap, HeaderValue}; +use uuid::Uuid; // generate the right header for digital ocean with token pub fn get_header_with_bearer(token: &str) -> HeaderMap { @@ -48,6 +49,10 @@ pub fn compute_image_tag + Hash, T: AsRef + Hash>( tag } +pub fn to_short_id(id: &Uuid) -> String { + format!("z{}", id.to_string().split_at(8).0) +} + #[cfg(test)] mod tests_utilities { use crate::utilities::compute_image_tag; diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index c000f998..fa05703b 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -39,12 +39,14 @@ use qovery_engine::logger::Logger; use qovery_engine::models::digital_ocean::DoRegion; use qovery_engine::models::scaleway::ScwZone; use qovery_engine::transaction::{DeploymentOption, Transaction, TransactionResult}; +use qovery_engine::utilities::to_short_id; use std::collections::BTreeMap; use std::path::Path; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use tracing::{span, Level}; +use uuid::Uuid; pub enum RegionActivationStatus { Deactivated, @@ -255,7 +257,7 @@ pub fn environment_3_apps_3_routers_3_databases( action: Action::Create, applications: vec![ Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: app_name_1.clone(), git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "5990752647af11ef21c3d46a51abbde3da1ab351".to_string(), @@ -270,6 +272,7 @@ pub fn environment_3_apps_3_routers_3_databases( }), storage: vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: 10, @@ -301,7 +304,7 @@ pub fn environment_3_apps_3_routers_3_databases( advance_settings: Default::default(), }, Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: app_name_2.clone(), git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "5990752647af11ef21c3d46a51abbde3da1ab351".to_string(), @@ -316,6 +319,7 @@ pub fn environment_3_apps_3_routers_3_databases( }), storage: vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: 10, @@ -347,7 +351,7 @@ pub fn environment_3_apps_3_routers_3_databases( advance_settings: Default::default(), }, Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: app_name_3.clone(), git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "158ea8ebc9897c50a7c56b910db33ce837ac1e61".to_string(), @@ -362,6 +366,7 @@ pub fn environment_3_apps_3_routers_3_databases( }), storage: vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: 10, @@ -523,7 +528,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi organization_id: context.organization_id().to_string(), action: Action::Create, applications: vec![Application { - id: application_id, + long_id: Uuid::new_v4(), name: application_name, git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "fc575a2f3be0b9100492c8a463bf18134a8698a5".to_string(), @@ -575,7 +580,6 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi pub fn database_test_environment(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); - let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); EnvironmentRequest { @@ -586,7 +590,7 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { organization_id: context.organization_id().to_string(), action: Action::Create, applications: vec![Application { - id: application_id, + long_id: Uuid::new_v4(), name: application_name, git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "fc575a2f3be0b9100492c8a463bf18134a8698a5".to_string(), @@ -678,7 +682,7 @@ pub fn environnement_2_app_2_routers_1_psql( }], applications: vec![ Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: application_name1.to_string(), git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "680550d1937b3f90551849c0da8f77c39916913b".to_string(), @@ -693,6 +697,7 @@ pub fn environnement_2_app_2_routers_1_psql( }), storage: vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: 10, @@ -724,7 +729,7 @@ pub fn environnement_2_app_2_routers_1_psql( advance_settings: Default::default(), }, Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: application_name2.to_string(), git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "680550d1937b3f90551849c0da8f77c39916913b".to_string(), @@ -739,6 +744,7 @@ pub fn environnement_2_app_2_routers_1_psql( }), storage: vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: 10, @@ -831,7 +837,7 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment organization_id: context.organization_id().to_string(), action: Action::Create, applications: vec![Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: format!("{}-{}", "echo-app".to_string(), &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), @@ -894,7 +900,7 @@ pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { organization_id: context.organization_id().to_string(), action: Action::Create, applications: vec![Application { - id: generate_id(), + long_id: Uuid::new_v4(), name: format!("{}-{}", "mini-http".to_string(), &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), @@ -935,6 +941,7 @@ pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { pub fn environment_only_http_server_router(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); + let id = Uuid::new_v4(); EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), @@ -943,7 +950,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) organization_id: context.organization_id().to_string(), action: Action::Create, applications: vec![Application { - id: generate_id(), + long_id: id, name: format!("{}-{}", "mini-http".to_string(), &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), @@ -1045,7 +1052,7 @@ pub fn test_db( let _enter = span.enter(); let context_for_delete = context.clone_not_same_execution_id(); - let app_id = generate_id(); + let app_id = Uuid::new_v4(); let database_username = "superuser".to_string(); let database_password = generate_password(provider_kind.clone(), database_mode.clone()); let db_kind_str = db_kind.name().to_string(); @@ -1107,8 +1114,8 @@ pub fn test_db( .applications .into_iter() .map(|mut app| { - app.id = app_id.clone(); - app.name = app_id.clone(); + app.long_id = app_id.clone(); + app.name = to_short_id(&app_id); app.branch = app_name.clone(); app.commit_id = db_infos.app_commit.clone(); app.ports = vec![Port { diff --git a/tests/aws/aws_environment.rs b/tests/aws/aws_environment.rs index ac3b3dc0..07426987 100644 --- a/tests/aws/aws_environment.rs +++ b/tests/aws/aws_environment.rs @@ -9,12 +9,14 @@ use qovery_engine::cloud_provider::Kind; use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs; use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; +use qovery_engine::utilities::to_short_id; use std::collections::BTreeMap; use std::thread; use std::time::Duration; use test_utilities::aws::aws_default_engine_config; use test_utilities::utilities::{context, init, kubernetes_config_path}; use tracing::{span, Level}; +use uuid::Uuid; #[cfg(feature = "test-aws-minimal")] #[named] @@ -159,7 +161,7 @@ fn deploy_a_working_environment_and_pause_it_eks() { ); let ea = environment.clone(); - let selector = format!("appId={}", environment.applications[0].id); + let selector = format!("appId={}", to_short_id(&environment.applications[0].long_id)); let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -220,7 +222,12 @@ fn deploy_a_working_environment_and_pause_it_eks() { None, ); for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") { - assert_eq!(pdb.metadata.name.contains(&environment.applications[0].id), false) + assert_eq!( + pdb.metadata + .name + .contains(&to_short_id(&environment.applications[0].long_id)), + false + ) } // Check we can resume the env @@ -265,7 +272,11 @@ fn deploy_a_working_environment_and_pause_it_eks() { ); let mut filtered_pdb = false; for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") { - if pdb.metadata.name.contains(&environment.applications[0].id) { + if pdb + .metadata + .name + .contains(&to_short_id(&environment.applications[0].long_id)) + { filtered_pdb = true; break; } @@ -575,6 +586,7 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, @@ -657,6 +669,7 @@ fn redeploy_same_app_with_ebs() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, diff --git a/tests/digitalocean/do_environment.rs b/tests/digitalocean/do_environment.rs index 756422a6..a8a3cfc3 100644 --- a/tests/digitalocean/do_environment.rs +++ b/tests/digitalocean/do_environment.rs @@ -9,6 +9,7 @@ use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; +use qovery_engine::utilities::to_short_id; use std::collections::BTreeMap; use std::thread; use std::time::Duration; @@ -16,6 +17,7 @@ use test_utilities::common::Infrastructure; use test_utilities::digitalocean::do_default_engine_config; use test_utilities::utilities::context; use tracing::{span, warn, Level}; +use uuid::Uuid; // Note: All those tests relies on a test cluster running on DigitalOcean infrastructure. // This cluster should be live in order to have those tests passing properly. @@ -222,7 +224,7 @@ fn digitalocean_doks_deploy_a_working_environment_and_pause() { ); let env_action = environment.clone(); - let selector = format!("appId={}", environment.applications[0].id); + let selector = format!("appId={}", to_short_id(&environment.applications[0].long_id)); let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); @@ -451,6 +453,7 @@ fn digitalocean_doks_deploy_a_working_environment_with_storage() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, @@ -534,6 +537,7 @@ fn digitalocean_doks_redeploy_same_app() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, diff --git a/tests/scaleway/scw_environment.rs b/tests/scaleway/scw_environment.rs index 2fb6cdf1..4bfdc8fb 100644 --- a/tests/scaleway/scw_environment.rs +++ b/tests/scaleway/scw_environment.rs @@ -9,12 +9,14 @@ use ::function_name::named; use qovery_engine::cloud_provider::Kind; use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType}; use qovery_engine::transaction::TransactionResult; +use qovery_engine::utilities::to_short_id; use std::collections::BTreeMap; use std::thread; use std::time::Duration; use test_utilities::common::Infrastructure; use test_utilities::scaleway::scw_default_engine_config; use tracing::{span, warn, Level}; +use uuid::Uuid; // Note: All those tests relies on a test cluster running on Scaleway infrastructure. // This cluster should be live in order to have those tests passing properly. @@ -230,7 +232,7 @@ fn scaleway_kapsule_deploy_a_working_environment_and_pause() { ); let env_action = environment.clone(); - let selector = format!("appId={}", environment.applications[0].id); + let selector = format!("appId={}", to_short_id(&environment.applications[0].long_id)); let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -466,6 +468,7 @@ fn scaleway_kapsule_deploy_a_working_environment_with_storage() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, @@ -541,7 +544,7 @@ fn deploy_a_working_environment_and_pause_it() { ); let ea = environment.clone(); - let selector = format!("appId={}", environment.applications[0].id); + let selector = format!("appId={}", to_short_id(&environment.applications[0].long_id)); let result = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(result, TransactionResult::Ok)); @@ -634,6 +637,7 @@ fn scaleway_kapsule_redeploy_same_app() { .map(|mut app| { app.storage = vec![Storage { id: generate_id(), + long_id: Uuid::new_v4(), name: "photos".to_string(), storage_type: StorageType::Ssd, size_in_gib: storage_size, From 3444f325663f3efaa6f7a218e64f929abd285057 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Fri, 29 Apr 2022 15:37:40 +0200 Subject: [PATCH 089/122] Add long_id for databases (#703) * Add long_id for databases * Add long_id for router * Fix linter --- lib/aws/chart_values/mongodb/q-values.j2.yaml | 8 ++-- lib/aws/chart_values/mysql/q-values.j2.yaml | 6 +-- .../chart_values/postgresql/q-values.j2.yaml | 7 ++- lib/aws/chart_values/redis/q-values.j2.yaml | 9 ++-- .../templates/cert-issuer.j2.yaml | 2 +- .../templates/ingress-qovery.j2.yaml | 3 +- .../chart_values/mongodb/q-values.j2.yaml | 8 ++-- .../chart_values/mysql/q-values.j2.yaml | 6 +-- .../chart_values/postgresql/q-values.j2.yaml | 7 ++- .../chart_values/redis/q-values.j2.yaml | 9 ++-- .../templates/cert-issuer.j2.yaml | 2 +- .../templates/ingress-qovery.j2.yaml | 3 +- .../chart_values/mongodb/q-values.j2.yaml | 8 ++-- .../chart_values/mysql/q-values.j2.yaml | 6 +-- .../chart_values/postgresql/q-values.j2.yaml | 7 ++- .../chart_values/redis/q-values.j2.yaml | 9 ++-- .../templates/cert-issuer.j2.yaml | 2 +- .../templates/ingress-qovery.j2.yaml | 3 +- src/cloud_provider/service.rs | 3 ++ src/io_models.rs | 46 +++++++++---------- src/models/application.rs | 4 ++ src/models/aws/database.rs | 1 + src/models/database.rs | 13 +++++- src/models/router.rs | 12 ++++- src/models/scaleway/database.rs | 1 + test_utilities/src/common.rs | 27 ++++++----- test_utilities/src/utilities.rs | 3 +- tests/aws/aws_databases.rs | 6 ++- tests/digitalocean/do_databases.rs | 6 ++- tests/scaleway/scw_databases.rs | 6 ++- 30 files changed, 122 insertions(+), 111 deletions(-) diff --git a/lib/aws/chart_values/mongodb/q-values.j2.yaml b/lib/aws/chart_values/mongodb/q-values.j2.yaml index 64a34332..461ed841 100644 --- a/lib/aws/chart_values/mongodb/q-values.j2.yaml +++ b/lib/aws/chart_values/mongodb/q-values.j2.yaml @@ -207,19 +207,18 @@ annotations: {} # Additional abels to apply to the deployment or statefulsets labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} # Annotations to be added to MongoDB pods podAnnotations: {} # Additional pod labels to apply podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ @@ -345,10 +344,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} ## Configure the ingress resource that allows you to access the ## MongoDB installation. Set up the URL diff --git a/lib/aws/chart_values/mysql/q-values.j2.yaml b/lib/aws/chart_values/mysql/q-values.j2.yaml index 51cb92c3..15e57bad 100644 --- a/lib/aws/chart_values/mysql/q-values.j2.yaml +++ b/lib/aws/chart_values/mysql/q-values.j2.yaml @@ -44,10 +44,9 @@ fullnameOverride: {{ sanitized_name }} clusterDomain: cluster.local commonLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. @@ -315,10 +314,9 @@ master: ## PVC annotations ## annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} ## Persistent Volume Access Mode ## diff --git a/lib/aws/chart_values/postgresql/q-values.j2.yaml b/lib/aws/chart_values/postgresql/q-values.j2.yaml index 348a611c..be1475e2 100644 --- a/lib/aws/chart_values/postgresql/q-values.j2.yaml +++ b/lib/aws/chart_values/postgresql/q-values.j2.yaml @@ -326,10 +326,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} ## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies @@ -349,14 +348,14 @@ master: affinity: {} tolerations: [] labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} annotations: {} podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} podAnnotations: {} priorityClassName: "" ## Extra init containers diff --git a/lib/aws/chart_values/redis/q-values.j2.yaml b/lib/aws/chart_values/redis/q-values.j2.yaml index 69d7d354..4de95952 100644 --- a/lib/aws/chart_values/redis/q-values.j2.yaml +++ b/lib/aws/chart_values/redis/q-values.j2.yaml @@ -132,9 +132,9 @@ sentinel: ## annotations: {} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: ## Specifies the Kubernetes Cluster's Domain Name. @@ -291,10 +291,9 @@ master: ## Redis Master additional pod labels and annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: {{ sanitized_name }} + databaseLongId: {{ long_id }} podAnnotations: {} ## Redis Master resource requests and limits @@ -388,9 +387,9 @@ master: external-dns.alpha.kubernetes.io/ttl: "300" {% endif %} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: # loadBalancerSourceRanges: ["10.0.0.0/8"] @@ -425,9 +424,9 @@ master: ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets statefulset: labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} updateStrategy: RollingUpdate ## Partition update strategy ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions diff --git a/lib/aws/charts/q-ingress-tls/templates/cert-issuer.j2.yaml b/lib/aws/charts/q-ingress-tls/templates/cert-issuer.j2.yaml index cfb54db6..5a4d0ae4 100644 --- a/lib/aws/charts/q-ingress-tls/templates/cert-issuer.j2.yaml +++ b/lib/aws/charts/q-ingress-tls/templates/cert-issuer.j2.yaml @@ -6,7 +6,7 @@ metadata: name: {{ id }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} + routerLongId: {{ long_id }} spec: acme: server: {{ spec_acme_server }} diff --git a/lib/aws/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml b/lib/aws/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml index 5cf11a24..8e4c688e 100644 --- a/lib/aws/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml +++ b/lib/aws/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml @@ -6,9 +6,8 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} - routerName: {{ sanitized_name }} routerId: {{ id }} + routerLongId: {{ long_id }} envId: {{ environment_id }} fqdn: "{{ router_default_domain }}" annotations: diff --git a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml index 46506ac1..273f0404 100644 --- a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml @@ -210,19 +210,18 @@ annotations: {} # Additional abels to apply to the deployment or statefulsets labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} # Annotations to be added to MongoDB pods podAnnotations: {} # Additional pod labels to apply podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ @@ -348,10 +347,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## Configure the ingress resource that allows you to access the ## MongoDB installation. Set up the URL diff --git a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml index 5e288642..4a68de80 100644 --- a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml @@ -44,10 +44,9 @@ fullnameOverride: '{{ sanitized_name }}' clusterDomain: cluster.local commonLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. @@ -315,10 +314,9 @@ master: ## PVC annotations ## annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## Persistent Volume Access Mode ## diff --git a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml index a3757916..ad87ddcc 100644 --- a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml @@ -328,10 +328,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies @@ -351,14 +350,14 @@ master: affinity: {} tolerations: [] labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} annotations: {} podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} podAnnotations: {} priorityClassName: "" ## Extra init containers diff --git a/lib/digitalocean/chart_values/redis/q-values.j2.yaml b/lib/digitalocean/chart_values/redis/q-values.j2.yaml index e1db86e7..2f44304e 100644 --- a/lib/digitalocean/chart_values/redis/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/redis/q-values.j2.yaml @@ -132,9 +132,9 @@ sentinel: ## annotations: {} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: ## Specifies the Kubernetes Cluster's Domain Name. @@ -291,10 +291,9 @@ master: ## Redis Master additional pod labels and annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} podAnnotations: {} ## Redis Master resource requests and limits @@ -391,9 +390,9 @@ master: external-dns.alpha.kubernetes.io/ttl: "300" {% endif %} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: # loadBalancerSourceRanges: ["10.0.0.0/8"] @@ -428,9 +427,9 @@ master: ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets statefulset: labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} updateStrategy: RollingUpdate ## Partition update strategy ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions diff --git a/lib/digitalocean/charts/q-ingress-tls/templates/cert-issuer.j2.yaml b/lib/digitalocean/charts/q-ingress-tls/templates/cert-issuer.j2.yaml index 96623d12..23b35f81 100644 --- a/lib/digitalocean/charts/q-ingress-tls/templates/cert-issuer.j2.yaml +++ b/lib/digitalocean/charts/q-ingress-tls/templates/cert-issuer.j2.yaml @@ -6,7 +6,7 @@ metadata: name: {{ id }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} + routerLongId: {{ long_id }} spec: acme: server: {{ spec_acme_server }} diff --git a/lib/digitalocean/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml b/lib/digitalocean/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml index 5cf11a24..8e4c688e 100644 --- a/lib/digitalocean/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml +++ b/lib/digitalocean/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml @@ -6,9 +6,8 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} - routerName: {{ sanitized_name }} routerId: {{ id }} + routerLongId: {{ long_id }} envId: {{ environment_id }} fqdn: "{{ router_default_domain }}" annotations: diff --git a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml index 12baf9b3..e1a609f6 100644 --- a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml @@ -145,17 +145,16 @@ replicaSet: # Additional abels to apply to the deployment or statefulsets labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} # Additional pod labels to apply podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} ## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies @@ -197,10 +196,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} # volume.beta.kubernetes.io/storage-class=scw-sbv-ssd-0: ## Prometheus Exporter / Metrics diff --git a/lib/scaleway/chart_values/mysql/q-values.j2.yaml b/lib/scaleway/chart_values/mysql/q-values.j2.yaml index 614cb3d2..7d66a8f4 100644 --- a/lib/scaleway/chart_values/mysql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mysql/q-values.j2.yaml @@ -44,10 +44,9 @@ fullnameOverride: '{{ sanitized_name }}' clusterDomain: cluster.local commonLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. @@ -293,10 +292,9 @@ master: ## PVC annotations ## annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## Persistent Volume Access Mode ## diff --git a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml index 81a64782..b1a83685 100644 --- a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml @@ -169,10 +169,9 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} ## ## PostgreSQL Master parameters @@ -187,14 +186,14 @@ master: affinity: {} tolerations: [] labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} annotations: {} podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} podAnnotations: {} priorityClassName: "" ## Extra init containers diff --git a/lib/scaleway/chart_values/redis/q-values.j2.yaml b/lib/scaleway/chart_values/redis/q-values.j2.yaml index 80619204..f1b92cb8 100644 --- a/lib/scaleway/chart_values/redis/q-values.j2.yaml +++ b/lib/scaleway/chart_values/redis/q-values.j2.yaml @@ -121,9 +121,9 @@ sentinel: ## annotations: {} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: networkPolicy: @@ -248,10 +248,9 @@ master: ## Redis Master additional pod labels and annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseName: '{{ sanitized_name }}' + databaseLongId: {{ long_id }} podAnnotations: {} ## Redis Master resource requests and limits @@ -350,9 +349,9 @@ master: external-dns.alpha.kubernetes.io/ttl: "300" {% endif %} labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} loadBalancerIP: # loadBalancerSourceRanges: ["10.0.0.0/8"] @@ -387,9 +386,9 @@ master: ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets statefulset: labels: - ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} + databaseLongId: {{ long_id }} updateStrategy: RollingUpdate ## Partition update strategy ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions diff --git a/lib/scaleway/charts/q-ingress-tls/templates/cert-issuer.j2.yaml b/lib/scaleway/charts/q-ingress-tls/templates/cert-issuer.j2.yaml index cfb54db6..5a4d0ae4 100644 --- a/lib/scaleway/charts/q-ingress-tls/templates/cert-issuer.j2.yaml +++ b/lib/scaleway/charts/q-ingress-tls/templates/cert-issuer.j2.yaml @@ -6,7 +6,7 @@ metadata: name: {{ id }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} + routerLongId: {{ long_id }} spec: acme: server: {{ spec_acme_server }} diff --git a/lib/scaleway/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml b/lib/scaleway/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml index 5cf11a24..8e4c688e 100644 --- a/lib/scaleway/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml +++ b/lib/scaleway/charts/q-ingress-tls/templates/ingress-qovery.j2.yaml @@ -6,9 +6,8 @@ metadata: name: {{ sanitized_name }} namespace: {{ namespace }} labels: - ownerId: {{ owner_id }} - routerName: {{ sanitized_name }} routerId: {{ id }} + routerLongId: {{ long_id }} envId: {{ environment_id }} fqdn: "{{ router_default_domain }}" annotations: diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 05ac86df..defbddf2 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -6,6 +6,7 @@ use std::thread; use std::time::Duration; use tera::Context as TeraContext; +use uuid::Uuid; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::helm::ChartInfo; @@ -31,6 +32,7 @@ pub trait Service: ToTransmitter { fn context(&self) -> &Context; fn service_type(&self) -> ServiceType; fn id(&self) -> &str; + fn long_id(&self) -> &Uuid; fn name(&self) -> &str; fn sanitized_name(&self) -> String; fn name_with_id(&self) -> String { @@ -328,6 +330,7 @@ pub fn default_tera_context( ) -> TeraContext { let mut context = TeraContext::new(); context.insert("id", service.id()); + context.insert("long_id", service.id()); context.insert("owner_id", environment.owner_id.as_str()); context.insert("project_id", environment.project_id.as_str()); context.insert("organization_id", environment.organization_id.as_str()); diff --git a/src/io_models.rs b/src/io_models.rs index 8985fe90..06f6bb4a 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -500,7 +500,7 @@ impl Storage { #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct Router { - pub id: String, + pub long_id: Uuid, pub name: String, pub action: Action, pub default_domain: String, @@ -544,7 +544,7 @@ impl Router { CPKind::Aws => { let router = Box::new(models::router::Router::::new( context.clone(), - self.id.as_str(), + self.long_id, self.name.as_str(), self.action.to_service_action(), self.default_domain.as_str(), @@ -560,7 +560,7 @@ impl Router { CPKind::Do => { let router = Box::new(models::router::Router::::new( context.clone(), - self.id.as_str(), + self.long_id, self.name.as_str(), self.action.to_service_action(), self.default_domain.as_str(), @@ -576,7 +576,7 @@ impl Router { CPKind::Scw => { let router = Box::new(models::router::Router::::new( context.clone(), - self.id.as_str(), + self.long_id, self.name.as_str(), self.action.to_service_action(), self.default_domain.as_str(), @@ -615,7 +615,7 @@ pub enum DatabaseMode { pub struct Database { pub kind: DatabaseKind, pub action: Action, - pub id: String, + pub long_id: Uuid, pub name: String, pub version: String, pub fqdn_id: String, @@ -666,7 +666,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -687,7 +687,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -709,7 +709,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -730,7 +730,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -751,7 +751,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -772,7 +772,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -793,7 +793,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -814,7 +814,7 @@ impl Database { (CPKind::Aws, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -836,7 +836,7 @@ impl Database { (CPKind::Do, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -857,7 +857,7 @@ impl Database { (CPKind::Do, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -878,7 +878,7 @@ impl Database { (CPKind::Do, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -899,7 +899,7 @@ impl Database { (CPKind::Do, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -936,7 +936,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -957,7 +957,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Postgresql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -978,7 +978,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::MANAGED) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -999,7 +999,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Mysql, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -1020,7 +1020,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Redis, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, @@ -1041,7 +1041,7 @@ impl Database { (CPKind::Scw, DatabaseKind::Mongodb, DatabaseMode::CONTAINER) => { let db = models::database::Database::::new( context.clone(), - self.id.as_str(), + self.long_id, self.action.to_service_action(), self.name.as_str(), version, diff --git a/src/models/application.rs b/src/models/application.rs index 4fe82864..3a3c5218 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -337,6 +337,10 @@ where fn selector(&self) -> Option { self.selector() } + + fn long_id(&self) -> &Uuid { + &self.long_id + } } impl Helm for Application { diff --git a/src/models/aws/database.rs b/src/models/aws/database.rs index a5237851..71c90b11 100644 --- a/src/models/aws/database.rs +++ b/src/models/aws/database.rs @@ -176,6 +176,7 @@ where ); context.insert("namespace", environment.namespace()); + context.insert("long_id", &self.long_id); let version = self .get_version_aws_managed(event_details)? diff --git a/src/models/database.rs b/src/models/database.rs index 4555e335..8d937bbd 100644 --- a/src/models/database.rs +++ b/src/models/database.rs @@ -15,10 +15,12 @@ use crate::models::database_utils::{ get_self_hosted_redis_version, }; use crate::models::types::{CloudProvider, ToTeraContext, VersionsNumber}; +use crate::utilities::to_short_id; use function_name::named; use std::borrow::Borrow; use std::marker::PhantomData; use tera::Context as TeraContext; +use uuid::Uuid; ///////////////////////////////////////////////////////////////// // Database mode @@ -71,6 +73,7 @@ pub struct Database> { _marker: PhantomData<(C, M, T)>, pub(super) context: Context, pub(super) id: String, + pub(super) long_id: Uuid, pub(super) action: Action, pub(super) name: String, pub(super) version: VersionsNumber, @@ -89,7 +92,7 @@ pub struct Database> { impl> Database { pub fn new( context: Context, - id: &str, + long_id: Uuid, action: Action, name: &str, version: VersionsNumber, @@ -110,7 +113,8 @@ impl> Database _marker: PhantomData, context, action, - id: id.to_string(), + id: to_short_id(&long_id), + long_id, name: name.to_string(), version, fqdn: fqdn.to_string(), @@ -178,6 +182,10 @@ where &self.id } + fn long_id(&self) -> &Uuid { + &self.long_id + } + fn name(&self) -> &str { &self.name } @@ -479,6 +487,7 @@ where context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_cluster_name", kubernetes.name()); + context.insert("long_id", &self.long_id); context.insert("fqdn_id", self.fqdn_id.as_str()); context.insert("fqdn", self.fqdn(target, &self.fqdn, M::is_managed()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); diff --git a/src/models/router.rs b/src/models/router.rs index 73a9a44d..2ed5bf31 100644 --- a/src/models/router.rs +++ b/src/models/router.rs @@ -14,10 +14,12 @@ use crate::io_models::{Context, Listen, Listener, Listeners}; use crate::logger::Logger; use crate::models::types::CloudProvider; use crate::models::types::ToTeraContext; +use crate::utilities::to_short_id; use function_name::named; use std::borrow::Borrow; use std::marker::PhantomData; use tera::Context as TeraContext; +use uuid::Uuid; #[derive(thiserror::Error, Debug)] pub enum RouterError { @@ -29,6 +31,7 @@ pub struct Router { _marker: PhantomData, pub(crate) context: Context, pub(crate) id: String, + pub(crate) long_id: Uuid, pub(crate) action: Action, pub(crate) name: String, pub(crate) default_domain: String, @@ -43,7 +46,7 @@ pub struct Router { impl Router { pub fn new( context: Context, - id: &str, + long_id: Uuid, name: &str, action: Action, default_domain: &str, @@ -57,7 +60,8 @@ impl Router { Ok(Self { _marker: PhantomData, context, - id: id.to_string(), + id: to_short_id(&long_id), + long_id, name: name.to_string(), action, default_domain: default_domain.to_string(), @@ -243,6 +247,10 @@ where &self.id } + fn long_id(&self) -> &Uuid { + &self.long_id + } + fn name(&self) -> &str { &self.name } diff --git a/src/models/scaleway/database.rs b/src/models/scaleway/database.rs index 2649942a..30efe5a5 100644 --- a/src/models/scaleway/database.rs +++ b/src/models/scaleway/database.rs @@ -139,6 +139,7 @@ impl> Database { ); context.insert("namespace", environment.namespace()); + context.insert("long_id", &self.long_id); let version = get_version(event_details)?.matched_version(); context.insert("version_major", &version.to_major_version_string()); diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index fa05703b..a4c2275f 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -402,7 +402,7 @@ pub fn environment_3_apps_3_routers_3_databases( ], routers: vec![ Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -415,7 +415,7 @@ pub fn environment_3_apps_3_routers_3_databases( sticky_sessions_enabled: false, }, Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "second-router".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -428,7 +428,7 @@ pub fn environment_3_apps_3_routers_3_databases( sticky_sessions_enabled: false, }, Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "third-router".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -445,7 +445,7 @@ pub fn environment_3_apps_3_routers_3_databases( Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_name.clone(), version: "11.8.0".to_string(), fqdn_id: fqdn.clone(), @@ -467,7 +467,7 @@ pub fn environment_3_apps_3_routers_3_databases( Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_name_2.clone(), version: "11.8.0".to_string(), fqdn_id: fqdn_2.clone(), @@ -489,7 +489,7 @@ pub fn environment_3_apps_3_routers_3_databases( Database { kind: DatabaseKind::Mongodb, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_db_name_mongo.clone(), version: version_mongo.to_string(), fqdn_id: database_host_mongo.clone(), @@ -517,7 +517,6 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi let suffix = generate_id(); let application_id = generate_id(); let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); - let router_id = generate_id(); let router_name = "main".to_string(); let application_domain = format!("{}.{}.{}", application_id, context.cluster_id().to_string(), test_domain); EnvironmentRequest { @@ -561,7 +560,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi advance_settings: Default::default(), }], routers: vec![Router { - id: router_id, + long_id: Uuid::new_v4(), name: router_name, action: Action::Create, default_domain: application_domain, @@ -661,7 +660,7 @@ pub fn environnement_2_app_2_routers_1_psql( databases: vec![Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_name.clone(), version: "11.8.0".to_string(), fqdn_id: fqdn.clone(), @@ -778,7 +777,7 @@ pub fn environnement_2_app_2_routers_1_psql( ], routers: vec![ Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -791,7 +790,7 @@ pub fn environnement_2_app_2_routers_1_psql( sticky_sessions_enabled: false, }, Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "second-router".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -873,7 +872,7 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment advance_settings: Default::default(), }], routers: vec![Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), @@ -984,7 +983,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) advance_settings: Default::default(), }], routers: vec![Router { - id: generate_id(), + long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), @@ -1087,7 +1086,7 @@ pub fn test_db( let db = Database { kind: db_kind.clone(), action: Action::Create, - id: db_id.clone(), + long_id: Uuid::new_v4(), name: db_id.clone(), version: version.to_string(), fqdn_id: database_host.clone(), diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 9ec311e6..1d16b621 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -53,6 +53,7 @@ use qovery_engine::io_models::DatabaseMode::MANAGED; use qovery_engine::logger::{Logger, StdIoLogger}; use qovery_engine::models::scaleway::ScwZone; use qovery_engine::runtime::block_on; +use qovery_engine::utilities::to_short_id; use time::Instant; use url::Url; @@ -897,7 +898,7 @@ pub fn db_fqnd(db: Database) -> String { match db.publicly_accessible { true => db.fqdn, false => match db.mode == MANAGED { - true => format!("{}-dns", db.id), + true => format!("{}-dns", to_short_id(&db.long_id)), false => match db.kind { DatabaseKind::Postgresql => "postgresqlpostgres", DatabaseKind::Mysql => "mysqlmysqldatabase", diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 72c3450a..30346270 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -5,6 +5,7 @@ use qovery_engine::cloud_provider::Kind; use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; use test_utilities::aws::aws_default_engine_config; use tracing::{span, Level}; +use uuid::Uuid; use self::test_utilities::aws::{AWS_DATABASE_DISK_TYPE, AWS_DATABASE_INSTANCE_TYPE}; use self::test_utilities::utilities::{ @@ -12,6 +13,7 @@ use self::test_utilities::utilities::{ }; use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; +use qovery_engine::utilities::to_short_id; use test_utilities::common::{test_db, Infrastructure}; /** @@ -261,7 +263,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { environment.databases = vec![Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_db_name.clone(), version: "11.8.0".to_string(), fqdn_id: database_host.clone(), @@ -323,7 +325,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql{}-0", &environment_check.databases[0].id); + let database_name = format!("postgresql{}-0", to_short_id(&environment_check.databases[0].long_id)); match is_pod_restarted_env(context, Kind::Aws, environment_check, database_name.as_str(), secrets) { (true, _) => assert!(true), (false, _) => assert!(false), diff --git a/tests/digitalocean/do_databases.rs b/tests/digitalocean/do_databases.rs index e806c2c2..6feefd0a 100644 --- a/tests/digitalocean/do_databases.rs +++ b/tests/digitalocean/do_databases.rs @@ -1,5 +1,6 @@ use ::function_name::named; use tracing::{span, warn, Level}; +use uuid::Uuid; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; @@ -9,6 +10,7 @@ use test_utilities::utilities::{ }; use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::utilities::to_short_id; use test_utilities::common::{database_test_environment, test_db, Infrastructure}; use test_utilities::digitalocean::{ clean_environments, do_default_engine_config, DO_MANAGED_DATABASE_DISK_TYPE, DO_MANAGED_DATABASE_INSTANCE_TYPE, @@ -279,7 +281,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { environment.databases = vec![Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_db_name.clone(), version: "11.8.0".to_string(), fqdn_id: database_host.clone(), @@ -355,7 +357,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(ret, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql-{}-0", &environment_check.databases[0].id); + let database_name = format!("postgresql-{}-0", to_short_id(&environment_check.databases[0].long_id)); match is_pod_restarted_env( context.clone(), ProviderKind::Do, diff --git a/tests/scaleway/scw_databases.rs b/tests/scaleway/scw_databases.rs index 8de7ff7e..c56ab6bf 100644 --- a/tests/scaleway/scw_databases.rs +++ b/tests/scaleway/scw_databases.rs @@ -1,5 +1,6 @@ use ::function_name::named; use tracing::{span, warn, Level}; +use uuid::Uuid; use qovery_engine::cloud_provider::{Kind as ProviderKind, Kind}; use qovery_engine::io_models::{Action, CloneForTest, Database, DatabaseKind, DatabaseMode, Port, Protocol}; @@ -10,6 +11,7 @@ use test_utilities::utilities::{ }; use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; +use qovery_engine::utilities::to_short_id; use test_utilities::common::test_db; use test_utilities::common::{database_test_environment, Infrastructure}; use test_utilities::scaleway::{ @@ -284,7 +286,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { environment.databases = vec![Database { kind: DatabaseKind::Postgresql, action: Action::Create, - id: generate_id(), + long_id: Uuid::new_v4(), name: database_db_name.clone(), version: "11.8.0".to_string(), fqdn_id: database_host.clone(), @@ -360,7 +362,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() { assert!(matches!(result, TransactionResult::Ok)); // TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY - let database_name = format!("postgresql-{}-0", &environment_check.databases[0].id); + let database_name = format!("postgresql-{}-0", to_short_id(&environment_check.databases[0].long_id)); match is_pod_restarted_env( context.clone(), ProviderKind::Scw, From 80fd9fc0702f6a204530f5bf4381c0585545d8f5 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 29 Apr 2022 16:01:35 +0200 Subject: [PATCH 090/122] Fix long_id for router --- src/cloud_provider/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index defbddf2..46b7844e 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -330,7 +330,7 @@ pub fn default_tera_context( ) -> TeraContext { let mut context = TeraContext::new(); context.insert("id", service.id()); - context.insert("long_id", service.id()); + context.insert("long_id", service.long_id()); context.insert("owner_id", environment.owner_id.as_str()); context.insert("project_id", environment.project_id.as_str()); context.insert("organization_id", environment.organization_id.as_str()); From e3439eff8efbe97dc67d247484f6294aecdeab96 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 29 Apr 2022 16:12:20 +0200 Subject: [PATCH 091/122] Remove duplicate long_id --- src/models/aws/database.rs | 1 - src/models/database.rs | 1 - src/models/scaleway/database.rs | 1 - 3 files changed, 3 deletions(-) diff --git a/src/models/aws/database.rs b/src/models/aws/database.rs index 71c90b11..a5237851 100644 --- a/src/models/aws/database.rs +++ b/src/models/aws/database.rs @@ -176,7 +176,6 @@ where ); context.insert("namespace", environment.namespace()); - context.insert("long_id", &self.long_id); let version = self .get_version_aws_managed(event_details)? diff --git a/src/models/database.rs b/src/models/database.rs index 8d937bbd..589d87f5 100644 --- a/src/models/database.rs +++ b/src/models/database.rs @@ -487,7 +487,6 @@ where context.insert("kubernetes_cluster_id", kubernetes.id()); context.insert("kubernetes_cluster_name", kubernetes.name()); - context.insert("long_id", &self.long_id); context.insert("fqdn_id", self.fqdn_id.as_str()); context.insert("fqdn", self.fqdn(target, &self.fqdn, M::is_managed()).as_str()); context.insert("service_name", self.fqdn_id.as_str()); diff --git a/src/models/scaleway/database.rs b/src/models/scaleway/database.rs index 30efe5a5..2649942a 100644 --- a/src/models/scaleway/database.rs +++ b/src/models/scaleway/database.rs @@ -139,7 +139,6 @@ impl> Database { ); context.insert("namespace", environment.namespace()); - context.insert("long_id", &self.long_id); let version = get_version(event_details)?.matched_version(); context.insert("version_major", &version.to_major_version_string()); From ea6d0ecb44d1af9d1706a4d3085c713274458ef8 Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 29 Apr 2022 16:35:11 +0200 Subject: [PATCH 092/122] Revert database pvc annotation changes --- lib/aws/chart_values/mongodb/q-values.j2.yaml | 3 ++- lib/aws/chart_values/mysql/q-values.j2.yaml | 3 ++- lib/aws/chart_values/postgresql/q-values.j2.yaml | 1 + lib/digitalocean/chart_values/mongodb/q-values.j2.yaml | 3 ++- lib/digitalocean/chart_values/mysql/q-values.j2.yaml | 3 ++- lib/digitalocean/chart_values/postgresql/q-values.j2.yaml | 3 ++- lib/scaleway/chart_values/mongodb/q-values.j2.yaml | 3 ++- lib/scaleway/chart_values/mysql/q-values.j2.yaml | 3 ++- lib/scaleway/chart_values/postgresql/q-values.j2.yaml | 3 ++- 9 files changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/aws/chart_values/mongodb/q-values.j2.yaml b/lib/aws/chart_values/mongodb/q-values.j2.yaml index 461ed841..3e513b15 100644 --- a/lib/aws/chart_values/mongodb/q-values.j2.yaml +++ b/lib/aws/chart_values/mongodb/q-values.j2.yaml @@ -344,9 +344,10 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: {{ sanitized_name }} ## Configure the ingress resource that allows you to access the ## MongoDB installation. Set up the URL diff --git a/lib/aws/chart_values/mysql/q-values.j2.yaml b/lib/aws/chart_values/mysql/q-values.j2.yaml index 15e57bad..218cf192 100644 --- a/lib/aws/chart_values/mysql/q-values.j2.yaml +++ b/lib/aws/chart_values/mysql/q-values.j2.yaml @@ -314,9 +314,10 @@ master: ## PVC annotations ## annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: {{ sanitized_name }} ## Persistent Volume Access Mode ## diff --git a/lib/aws/chart_values/postgresql/q-values.j2.yaml b/lib/aws/chart_values/postgresql/q-values.j2.yaml index be1475e2..886a36b8 100644 --- a/lib/aws/chart_values/postgresql/q-values.j2.yaml +++ b/lib/aws/chart_values/postgresql/q-values.j2.yaml @@ -326,6 +326,7 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} databaseLongId: {{ long_id }} diff --git a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml index 273f0404..409ff2ec 100644 --- a/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mongodb/q-values.j2.yaml @@ -347,9 +347,10 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' ## Configure the ingress resource that allows you to access the ## MongoDB installation. Set up the URL diff --git a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml index 4a68de80..3c8ec5db 100644 --- a/lib/digitalocean/chart_values/mysql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/mysql/q-values.j2.yaml @@ -314,9 +314,10 @@ master: ## PVC annotations ## annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' ## Persistent Volume Access Mode ## diff --git a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml index ad87ddcc..22c1974e 100644 --- a/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml +++ b/lib/digitalocean/chart_values/postgresql/q-values.j2.yaml @@ -328,9 +328,10 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' ## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies diff --git a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml index e1a609f6..e5a0d7d9 100644 --- a/lib/scaleway/chart_values/mongodb/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mongodb/q-values.j2.yaml @@ -196,9 +196,10 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' # volume.beta.kubernetes.io/storage-class=scw-sbv-ssd-0: ## Prometheus Exporter / Metrics diff --git a/lib/scaleway/chart_values/mysql/q-values.j2.yaml b/lib/scaleway/chart_values/mysql/q-values.j2.yaml index 7d66a8f4..612b39cd 100644 --- a/lib/scaleway/chart_values/mysql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/mysql/q-values.j2.yaml @@ -292,9 +292,10 @@ master: ## PVC annotations ## annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' ## Persistent Volume Access Mode ## diff --git a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml index b1a83685..9ca8052f 100644 --- a/lib/scaleway/chart_values/postgresql/q-values.j2.yaml +++ b/lib/scaleway/chart_values/postgresql/q-values.j2.yaml @@ -169,9 +169,10 @@ persistence: - ReadWriteOnce size: {{ database_disk_size_in_gib }}Gi annotations: + ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: '{{ sanitized_name }}' ## ## PostgreSQL Master parameters From c9616927341bf4b8ac8b36d2e1edc3c6894414db Mon Sep 17 00:00:00 2001 From: Romain GERARD Date: Fri, 29 Apr 2022 17:03:38 +0200 Subject: [PATCH 093/122] Revert database pvc annotation changes --- lib/aws/chart_values/postgresql/q-values.j2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/aws/chart_values/postgresql/q-values.j2.yaml b/lib/aws/chart_values/postgresql/q-values.j2.yaml index 886a36b8..1c11b3c8 100644 --- a/lib/aws/chart_values/postgresql/q-values.j2.yaml +++ b/lib/aws/chart_values/postgresql/q-values.j2.yaml @@ -329,7 +329,7 @@ persistence: ownerId: {{ owner_id }} envId: {{ environment_id }} databaseId: {{ id }} - databaseLongId: {{ long_id }} + databaseName: {{ sanitized_name }} ## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies From 6e7638828c1496003b79150a42262419bf6ec87f Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Fri, 29 Apr 2022 17:48:56 +0200 Subject: [PATCH 094/122] feat: aws kubeconfig retrieval improvment This CL tries to introduce a better handling of S3 eventual consitency. https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel Issue was at cluster creation, we try to get a kubeconfig which doesn't exists yet in order to know whether or not the cluster exists and needs to be upgraded. It leads to S3 caching the fact this bucket doesn't exists and reject later s# get while the bucket actually exists. By introducing a retry mechanism, we allow s3 to properly propagate new file / bucket creation and reach consistency. --- .../bootstrap/charts/aws-calico/.helmignore | 22 ++ .../bootstrap/charts/aws-calico/Chart.yaml | 6 + .../bootstrap/charts/aws-calico/README.md | 66 ++++++ .../charts/aws-calico/crds/crds.yaml | 214 ++++++++++++++++++ .../charts/aws-calico/templates/_helpers.tpl | 55 +++++ .../aws-calico/templates/config-map.yaml | 22 ++ .../aws-calico/templates/daemon-set.yaml | 142 ++++++++++++ .../aws-calico/templates/deployment.yaml | 128 +++++++++++ .../templates/pod-disruption-budget.yaml | 13 ++ .../templates/podsecuritypolicy.yaml | 211 +++++++++++++++++ .../charts/aws-calico/templates/rbac.yaml | 214 ++++++++++++++++++ .../templates/service-accounts.yaml | 18 ++ .../charts/aws-calico/templates/service.yaml | 15 ++ .../bootstrap/charts/aws-calico/values.yaml | 54 +++++ .../charts/aws-limits-exporter/.helmignore | 23 ++ .../charts/aws-limits-exporter/Chart.yaml | 6 + .../templates/_helpers.tpl | 63 ++++++ .../templates/deployment.yaml | 67 ++++++ .../templates/secrets.yaml | 8 + .../templates/service.yaml | 16 ++ .../templates/serviceaccount.yaml | 12 + .../templates/servicemonitor.yaml | 19 ++ .../charts/aws-limits-exporter/values.yaml | 65 ++++++ .../aws-node-termination-handler/.helmignore | 22 ++ .../aws-node-termination-handler/Chart.yaml | 27 +++ .../aws-node-termination-handler/README.md | 96 ++++++++ .../templates/_helpers.tpl | 57 +++++ .../templates/clusterrole.yaml | 37 +++ .../templates/clusterrolebinding.yaml | 12 + .../templates/daemonset.yaml | 141 ++++++++++++ .../templates/psp.yaml | 57 +++++ .../templates/serviceaccount.yaml | 13 ++ .../aws-node-termination-handler/values.yaml | 102 +++++++++ .../bootstrap/charts/aws-ui-view/.helmignore | 22 ++ .../bootstrap/charts/aws-ui-view/Chart.yaml | 5 + .../charts/aws-ui-view/templates/_helpers.tpl | 47 ++++ .../aws-ui-view/templates/clusterrole.yaml | 35 +++ .../templates/clusterrolebinding.yaml | 12 + .../bootstrap/charts/aws-ui-view/values.yaml | 3 + .../bootstrap/charts/aws-vpc-cni/.helmignore | 22 ++ .../bootstrap/charts/aws-vpc-cni/Chart.yaml | 18 ++ .../bootstrap/charts/aws-vpc-cni/README.md | 94 ++++++++ .../charts/aws-vpc-cni/templates/_helpers.tpl | 57 +++++ .../aws-vpc-cni/templates/clusterrole.yaml | 25 ++ .../templates/clusterrolebinding.yaml | 14 ++ .../aws-vpc-cni/templates/configmap.yaml | 10 + .../templates/customresourcedefinition.yaml | 19 ++ .../aws-vpc-cni/templates/daemonset.yaml | 138 +++++++++++ .../aws-vpc-cni/templates/eniconfig.yaml | 17 ++ .../aws-vpc-cni/templates/serviceaccount.yaml | 12 + .../bootstrap/charts/aws-vpc-cni/values.yaml | 161 +++++++++++++ .../charts/coredns-config/.helmignore | 23 ++ .../charts/coredns-config/Chart.yaml | 6 + .../coredns-config/templates/_helpers.tpl | 62 +++++ .../coredns-config/templates/configmap.yml | 31 +++ .../charts/coredns-config/values.yaml | 4 + .../charts/iam-eks-user-mapper/.helmignore | 23 ++ .../charts/iam-eks-user-mapper/Chart.yaml | 6 + .../templates/_helpers.tpl | 63 ++++++ .../templates/deployment.yaml | 65 ++++++ .../iam-eks-user-mapper/templates/rbac.yaml | 24 ++ .../iam-eks-user-mapper/templates/secret.yaml | 10 + .../templates/serviceaccount.yaml | 13 ++ .../charts/iam-eks-user-mapper/values.yaml | 65 ++++++ .../charts/q-storageclass/.helmignore | 23 ++ .../charts/q-storageclass/Chart.yaml | 23 ++ .../q-storageclass/templates/_helpers.tpl | 63 ++++++ .../templates/storageclass.yaml | 64 ++++++ .../charts/q-storageclass/values.yaml | 0 src/cloud_provider/kubernetes.rs | 23 +- 70 files changed, 3318 insertions(+), 7 deletions(-) create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/README.md create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore new file mode 100644 index 00000000..5ae7e8be --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +crds/kustomization.yaml diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml new file mode 100644 index 00000000..40ab5de7 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +appVersion: 3.13.4 +description: A Helm chart for installing Calico on AWS +icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png +name: aws-calico +version: 0.3.1 diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/README.md b/lib/aws-ec2/bootstrap/charts/aws-calico/README.md new file mode 100644 index 00000000..9abbca69 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/README.md @@ -0,0 +1,66 @@ +# Calico on AWS + +This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html + +## Prerequisites + +- Kubernetes 1.11+ running on AWS + +## Installing the Chart + +First add the EKS repository to Helm: + +```shell +helm repo add eks https://aws.github.io/eks-charts +``` + +Install the Calico CRDs: + +```shell +kubectl apply -k github.com/aws/eks-charts/tree/master/stable/aws-calico/crds +``` + +To install the chart with the release name `aws-calico` and default configuration: + +```shell +$ helm install --name aws-calico --namespace kube-system eks/aws-calico +``` + +To install into an EKS cluster where the CNI is already installed, you can run: + +```shell +helm upgrade --install --recreate-pods --force aws-calico --namespace kube-system eks/aws-calico +``` + +If you receive an error similar to `Error: release aws-calico failed: "aws-calico" already exists`, simply rerun the above command. + +## Configuration + +The following table lists the configurable parameters for this chart and their default values. + +| Parameter | Description | Default | +|----------------------------------------|---------------------------------------------------------|---------------------------------| +| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | +| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | +| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | +| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | +| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | +| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | +| `calico.node.logseverity` | Calico Node Log Severity | `Info` | +| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | +| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.tag` | Calico version | `v3.8.1` | +| `fullnameOverride` | Override the fullname of the chart | `calico` | +| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | +| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: + +```shell +$ helm install --name aws-calico --namespace kube-system eks/aws-calico --values values.yaml +``` diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml new file mode 100644 index 00000000..73fe142f --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml @@ -0,0 +1,214 @@ +# Create all the CustomResourceDefinitions needed for +# Calico policy-only mode. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + versions: + - name: v1 + served: true + storage: true + names: + kind: NetworkSet + plural: networksets + singular: networkset \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl new file mode 100644 index 00000000..0a18027c --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl @@ -0,0 +1,55 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-calico.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-calico.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-calico.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-calico.labels" -}} +helm.sh/chart: {{ include "aws-calico.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-calico.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-calico.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml new file mode 100644 index 00000000..9a3cfaa5 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml @@ -0,0 +1,22 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +data: + ladder: |- + { + "coresToReplicas": [], + "nodesToReplicas": + [ + [1, 1], + [10, 2], + [100, 3], + [250, 4], + [500, 5], + [1000, 6], + [1500, 7], + [2000, 8] + ] + } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml new file mode 100644 index 00000000..ce553146 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml @@ -0,0 +1,142 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + spec: + priorityClassName: system-node-critical + nodeSelector: + {{- toYaml .Values.calico.node.nodeSelector | nindent 8 }} + hostNetwork: true + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: "{{ .Values.calico.node.image }}:{{ .Values.calico.tag }}" + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Use eni not cali for interface prefix + - name: FELIX_INTERFACEPREFIX + value: "eni" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "{{ .Values.calico.node.logseverity }}" + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,ecs" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + - name: FELIX_TYPHAK8SSERVICENAME + value: "calico-typha" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # This will make Felix honor AWS VPC CNI's mangle table + # rules. + - name: FELIX_IPTABLESMANGLEALLOWACTION + value: Return + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + - name: FELIX_LOGSEVERITYSYS + value: "none" + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "true" + - name: NO_DEFAULT_POOLS + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + - name: FELIX_HEALTHENABLED + value: "true" + {{- if .Values.calico.node.extraEnv }} + {{- toYaml .Values.calico.node.extraEnv | nindent 12 }} + {{- end }} + securityContext: + privileged: true + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + periodSeconds: 10 + resources: + {{- toYaml .Values.calico.node.resources | nindent 12 }} + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + volumes: + # Used to ensure proper kmods are installed. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + tolerations: + # Make sure calico/node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml new file mode 100644 index 00000000..a879a8d2 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml @@ -0,0 +1,128 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + revisionHistoryLimit: 2 + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + priorityClassName: system-cluster-critical + nodeSelector: + {{- toYaml .Values.calico.typha.nodeSelector | nindent 8 }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + {{- if .Values.calico.typha.tolerations }} +{{ toYaml .Values.calico.typha.tolerations | indent 10 }} + {{- end }} + hostNetwork: true + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: "{{ .Values.calico.typha.image }}:{{ .Values.calico.tag }}" + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Use eni not cali for interface prefix + - name: FELIX_INTERFACEPREFIX + value: "eni" + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + - name: TYPHA_LOGSEVERITYSCREEN + value: "{{ .Values.calico.typha.logseverity }}" + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "true" + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "9093" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_MAXCONNECTIONSLOWERLIMIT + value: "1" + - name: TYPHA_HEALTHENABLED + value: "true" + # This will make Felix honor AWS VPC CNI's mangle table + # rules. + - name: FELIX_IPTABLESMANGLEALLOWACTION + value: Return + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + resources: + {{- toYaml .Values.calico.typha.resources | nindent 12 }} + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + spec: + priorityClassName: system-cluster-critical + nodeSelector: + {{- toYaml .Values.calico.typha_autoscaler.nodeSelector | nindent 8 }} + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + {{- if .Values.calico.typha_autoscaler.tolerations }} +{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }} + {{- end }} + containers: + - image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}" + name: autoscaler + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler + - --target=deployment/{{ include "aws-calico.fullname" . }}-typha + - --logtostderr=true + - --v=2 + resources: + {{- toYaml .Values.calico.typha_autoscaler.resources | nindent 12 }} + serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml new file mode 100644 index 00000000..8635b315 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml @@ -0,0 +1,13 @@ +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000..c946ee71 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml @@ -0,0 +1,211 @@ +{{- if .Values.podSecurityPolicy.create -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-node + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - ALL + hostNetwork: true + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + - 'hostPath' + allowedHostPaths: + - pathPrefix: "/lib/modules" + readOnly: false + - pathPrefix: "/var/run/calico" + readOnly: false + - pathPrefix: "/var/lib/calico" + readOnly: false + - pathPrefix: "/run/xtables.lock" + readOnly: false + runAsUser: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-typha + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + hostNetwork: true + hostPorts: + - max: 5473 + min: 5473 + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-node-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-node +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-typha +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-node-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-node-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-node + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-typha-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-node + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + labels: + app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + kind: Role + name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ include "aws-calico.serviceAccountName" . }}-typha-cpha + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml new file mode 100644 index 00000000..7caa7fa4 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml @@ -0,0 +1,214 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} +rules: + # The CNI plugin needs to get pods, nodes, namespaces, and configmaps. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-node" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ include "aws-calico.fullname" . }}-node" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-node" + namespace: {{ .Release.Namespace }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + namespace: {{ .Release.Namespace }} + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale"] + verbs: ["get", "update"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" + labels: +{{ include "aws-calico.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +subjects: + - kind: ServiceAccount + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + namespace: "{{ .Release.Namespace }}" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml new file mode 100644 index 00000000..21409395 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml @@ -0,0 +1,18 @@ +# Create the ServiceAccount and roles necessary for Calico. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ include "aws-calico.serviceAccountName" . }}-node" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" +{{ include "aws-calico.labels" . | indent 4 }} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-cpha" +{{ include "aws-calico.labels" . | indent 4 }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml new file mode 100644 index 00000000..4edb632d --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ include "aws-calico.fullname" . }}-typha" + labels: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" +{{ include "aws-calico.labels" . | indent 4 }} +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: "{{ include "aws-calico.fullname" . }}-typha" + selector: + app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml new file mode 100644 index 00000000..c192e92e --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml @@ -0,0 +1,54 @@ +fullnameOverride: calico + +serviceAccount: + create: true + +podSecurityPolicy: + create: false + +calico: + tag: v3.13.4 + + typha: + logseverity: Info #Debug, Info, Warning, Error, Fatal + image: quay.io/calico/typha + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "96Mi" + cpu: "100m" + tolerations: [] + nodeSelector: + beta.kubernetes.io/os: linux + node: + logseverity: Info #Debug, Info, Warning, Error, Fatal + image: quay.io/calico/node + resources: + requests: + memory: "32Mi" + cpu: "20m" + limits: + memory: "64Mi" + cpu: "100m" + extraEnv: [] + # - name: SOME_VAR + # value: 'some value' + nodeSelector: + beta.kubernetes.io/os: linux + typha_autoscaler: + resources: + requests: + memory: "16Mi" + cpu: "10m" + limits: + memory: "32Mi" + cpu: "10m" + tolerations: [] + nodeSelector: + beta.kubernetes.io/os: linux + +autoscaler: + tag: "1.7.1" + image: k8s.gcr.io/cluster-proportional-autoscaler-amd64 diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml new file mode 100644 index 00000000..c6ebc029 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: aws-limits-exporter +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: 0.3.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl new file mode 100644 index 00000000..bf005806 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-limits-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-limits-exporter.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-limits-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "aws-limits-exporter.labels" -}} +helm.sh/chart: {{ include "aws-limits-exporter.chart" . }} +{{ include "aws-limits-exporter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "aws-limits-exporter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "aws-limits-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-limits-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "aws-limits-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml new file mode 100644 index 00000000..eddcd329 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "aws-limits-exporter.fullname" . }} + labels: + {{- include "aws-limits-exporter.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "aws-limits-exporter.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "aws-limits-exporter.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-limits-exporter.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + env: + - name: AWS_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-limits-exporter + key: awsAccessKey + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-limits-exporter + key: awsSecretKey + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 20 + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml new file mode 100644 index 00000000..b7bd2cd9 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: aws-limits-exporter +type: Opaque +data: + awsAccessKey: {{ .Values.awsCredentials.awsAccessKey | b64enc}} + awsSecretKey: {{ .Values.awsCredentials.awsSecretKey | b64enc}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml new file mode 100644 index 00000000..0f89dc31 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "aws-limits-exporter.fullname" . }} + labels: + app: aws-limits-exporter + {{- include "aws-limits-exporter.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + {{- include "aws-limits-exporter.selectorLabels" . | nindent 4 }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml new file mode 100644 index 00000000..2bc9d0ee --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "aws-limits-exporter.serviceAccountName" . }} + labels: + {{- include "aws-limits-exporter.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml new file mode 100644 index 00000000..4b1423fb --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml @@ -0,0 +1,19 @@ +{{- if .Values.prometheusScraping.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "aws-limits-exporter.fullname" . }} + labels: + {{- include "aws-limits-exporter.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: aws-limits-exporter + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http-metrics + path: /metrics + interval: "{{ .Values.prometheusScraping.scrapInterval }}" +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml new file mode 100644 index 00000000..3726aefd --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml @@ -0,0 +1,65 @@ +replicaCount: 1 + +image: + repository: danielfm/aws-limits-exporter + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart version. + tag: "0.3.0" + +imagePullSecrets: [] +nameOverride: "aws-limits-exporter" +fullnameOverride: "aws-limits-exporter" + +awsCredentials: + awsAccessKey: "" + awsSecretKey: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +labels: + app.kubernetes.io/name: aws-limits-exporter + +selectorLabels: + app.kubernetes.io/name: aws-limits-exporter + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 8080 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +prometheusScraping: + enabled: true + scrapInterval: "60s" diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml new file mode 100644 index 00000000..47ed2baa --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +appVersion: 1.5.0 +description: A Helm chart for the AWS Node Termination Handler +home: https://github.com/aws/eks-charts +icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png +keywords: +- eks +- ec2 +- node-termination +- spot +maintainers: +- email: nckturner@users.noreply.github.com + name: Nicholas Turner + url: https://github.com/nckturner +- email: stefanprodan@users.noreply.github.com + name: Stefan Prodan + url: https://github.com/stefanprodan +- email: jillmon@users.noreply.github.com + name: Jillian Montalvo + url: https://github.com/jillmon +- email: mattrandallbecker@users.noreply.github.com + name: Matthew Becker + url: https://github.com/mattrandallbecker +name: aws-node-termination-handler +sources: +- https://github.com/aws/eks-charts +version: 0.8.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md new file mode 100644 index 00000000..f1847304 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md @@ -0,0 +1,96 @@ +# AWS Node Termination Handler + +AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler. + +## Prerequisites + +* Kubernetes >= 1.11 + +## Installing the Chart + +Add the EKS repository to Helm: +```sh +helm repo add eks https://aws.github.io/eks-charts +``` +Install AWS Node Termination Handler: +To install the chart with the release name aws-node-termination-handler and default configuration: + +```sh +helm install --name aws-node-termination-handler \ + --namespace kube-system eks/aws-node-termination-handler +``` + +To install into an EKS cluster where the Node Termination Handler is already installed, you can run: + +```sh +helm upgrade --install --recreate-pods --force \ + aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler +``` + +If you receive an error similar to `Error: release aws-node-termination-handler +failed: "aws-node-termination-handler" already exists`, simply rerun +the above command. + +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `aws-node-termination-handler` deployment: + +```sh +helm delete --purge aws-node-termination-handler +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`image.repository` | image repository | `amazon/aws-node-termination-handler` +`image.tag` | image tag | `` +`image.pullPolicy` | image pull policy | `IfNotPresent` +`image.pullSecrets` | image pull secrets (for private docker registries) | `[]` +`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false` +`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` +`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` +`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` +`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true` +`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80` +`webhookURL` | Posts event data to URL upon instance interruption action | `` +`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | `` +`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}` +`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}` +`dryRun` | If true, only log if a node would be drained | `false` +`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false` +`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true` +`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3` +`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false` +`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false` +`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false` +`affinity` | node/pod affinities | None +`podAnnotations` | annotations to add to each pod | `{}` +`priorityClassName` | Name of the priorityClass | `system-node-critical` +`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi` +`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet` +`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}` +`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]` +`rbac.create` | if `true`, create and use RBAC resources | `true` +`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` +`serviceAccount.create` | If `true`, create a new service account | `true` +`serviceAccount.name` | Service account to be used | None +`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` +`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime` +`securityContext.runAsUserID` | User ID to run the container | `1000` +`securityContext.runAsGroupID` | Group ID to run the container | `1000` +`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os` +`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch` +`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false` +`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092` + +## Metrics endpoint consideration +If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace +since it's listening on all interfaces. +Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl new file mode 100644 index 00000000..902844a7 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-node-termination-handler.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-node-termination-handler.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-node-termination-handler.labels" -}} +app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} +helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node-termination-handler +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-node-termination-handler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-node-termination-handler.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-node-termination-handler.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml new file mode 100644 index 00000000..dc800866 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml @@ -0,0 +1,37 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..b5c25327 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "aws-node-termination-handler.fullname" . }} + apiGroup: rbac.authorization.k8s.io diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml new file mode 100644 index 00000000..fb220022 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml @@ -0,0 +1,141 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +spec: + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + k8s-app: aws-node-termination-handler + spec: + volumes: + - name: "uptime" + hostPath: + path: "{{ .Values.procUptimeFile }}" + priorityClassName: "{{ .Values.priorityClassName }}" + affinity: + nodeAffinity: + # NOTE(jaypipes): Change when we complete + # https://github.com/aws/aws-node-termination-handler/issues/8 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }} + operator: In + values: + - linux + - key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }} + operator: In + values: + - amd64 + - arm + - arm64 + serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }} + hostNetwork: true + dnsPolicy: {{ .Values.dnsPolicy }} + containers: + - name: {{ include "aws-node-termination-handler.name" . }} + image: {{ .Values.image.repository}}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: {{ .Values.securityContext.runAsUserID }} + runAsGroup: {{ .Values.securityContext.runAsGroupID }} + allowPrivilegeEscalation: false + volumeMounts: + - name: "uptime" + mountPath: "/proc/uptime" + readOnly: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SPOT_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: GRACE_PERIOD + value: {{ .Values.gracePeriod | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: INSTANCE_METADATA_URL + value: {{ .Values.instanceMetadataURL | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: WEBHOOK_URL + value: {{ .Values.webhookURL | quote }} + - name: WEBHOOK_HEADERS + value: {{ .Values.webhookHeaders | quote }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: {{ .Values.enableSpotInterruptionDraining | quote }} + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: {{ .Values.enableScheduledEventDraining | quote }} + - name: METADATA_TRIES + value: {{ .Values.metadataTries | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: WEBHOOK_PROXY + value: {{ .Values.webhookProxy | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml new file mode 100644 index 00000000..0eda5002 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: false + hostIPC: false + hostNetwork: true + hostPID: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }}-psp + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "aws-node-termination-handler.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }}-psp + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "aws-node-termination-handler.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml new file mode 100644 index 00000000..55f2d766 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml new file mode 100644 index 00000000..469a51e4 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml @@ -0,0 +1,102 @@ +# Default values for aws-node-termination-handler. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: amazon/aws-node-termination-handler + tag: v1.5.0 + pullPolicy: IfNotPresent + pullSecrets: [] + +securityContext: + runAsUserID: 1000 + runAsGroupID: 1000 + +nameOverride: "" +fullnameOverride: "" + +priorityClassName: system-node-critical + +podAnnotations: {} + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved +enableSpotInterruptionDraining: "" + +## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event +enableScheduledEventDraining: "" + +taintNode: false + +## dryRun tells node-termination-handler to only log calls to kubernetes control plane +dryRun: false + +# deleteLocalData tells kubectl to continue even if there are pods using +# emptyDir (local data that will be deleted when the node is drained). +deleteLocalData: "" + +# ignoreDaemonSets causes kubectl to skip Daemon Set managed pods. +ignoreDaemonSets: "" + +# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully. +# If negative, the default value specified in the pod will be used. +gracePeriod: "" +podTerminationGracePeriod: "" + +# nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. +nodeTerminationGracePeriod: "" + +# webhookURL if specified, posts event data to URL upon instance interruption action. +webhookURL: "" + +# webhookProxy if specified, uses this HTTP(S) proxy configuration. +webhookProxy: "" + +# webhookHeaders if specified, replaces the default webhook headers. +webhookHeaders: "" + +# webhookTemplate if specified, replaces the default webhook message template. +webhookTemplate: "" + +# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80) +instanceMetadataURL: "" + +# (TESTING USE): Mount path for uptime file +procUptimeFile: "/proc/uptime" + +# nodeSelector tells the daemonset where to place the node-termination-handler +# pods. By default, this value is empty and every node will receive a pod. +nodeSelector: {} + +nodeSelectorTermsOs: "" +nodeSelectorTermsArch: "" + +enablePrometheusServer: false +prometheusServerPort: "9092" + +tolerations: + - operator: "Exists" + +affinity: {} + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. If namenot set and create is true, + # a name is generated using fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +rbac: + # rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created + pspEnabled: true + +dnsPolicy: "ClusterFirstWithHostNet" diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml new file mode 100644 index 00000000..6385e4d6 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1 +description: A Helm chart for the AWS UI View +name: aws-ui-view +version: 1.0.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl new file mode 100644 index 00000000..76e96336 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-ui-view.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-ui-view.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-ui-view.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-ui-view.labels" -}} +app.kubernetes.io/name: {{ include "aws-ui-view.name" . }} +helm.sh/chart: {{ include "aws-ui-view.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml new file mode 100644 index 00000000..dff89b24 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: {{ include "aws-ui-view.fullname" . }} +rules: + - apiGroups: + - '*' + resources: + - nodes + - namespaces + - pods + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: + - get + - list + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..16802963 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "aws-ui-view.fullname" . }} +subjects: + - kind: Group + name: Admins + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ include "aws-ui-view.fullname" . }} + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml new file mode 100644 index 00000000..299bcc74 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml @@ -0,0 +1,3 @@ +nameOverride: aws-ui-view + +fullnameOverride: "aws-ui-view" \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml new file mode 100644 index 00000000..2f572eb2 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: v1.7.5 +description: A Helm chart for the AWS VPC CNI +home: https://github.com/aws/amazon-vpc-cni-k8s +icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png +keywords: +- eks +- cni +- networking +- vpc +maintainers: +- email: jayanthvn@users.noreply.github.com + name: Jayanth Varavani + url: https://github.com/jayanthvn +name: aws-vpc-cni +sources: +- https://github.com/aws/amazon-vpc-cni-k8s +version: 1.1.3 diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md new file mode 100644 index 00000000..768f629d --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md @@ -0,0 +1,94 @@ +# AWS VPC CNI + +This chart installs the AWS CNI Daemonset: https://github.com/aws/amazon-vpc-cni-k8s + +## Prerequisites + +- Kubernetes 1.11+ running on AWS + +## Installing the Chart + +First add the EKS repository to Helm: + +```shell +helm repo add eks https://aws.github.io/eks-charts +``` + +To install the chart with the release name `aws-vpc-cni` and default configuration: + +```shell +$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +``` + +To install into an EKS cluster where the CNI is already installed, see [this section below](#adopting-the-existing-aws-node-resources-in-an-eks-cluster) + +## Configuration + +The following table lists the configurable parameters for this chart and their default values. + +| Parameter | Description | Default | +| ------------------------|---------------------------------------------------------|-------------------------------------| +| `affinity` | Map of node/pod affinities | `{}` | +| `cniConfig.enabled` | Enable overriding the default 10-aws.conflist file | `false` | +| `cniConfig.fileContents`| The contents of the custom cni config file | `nil` | +| `eniConfig.create` | Specifies whether to create ENIConfig resource(s) | `false` | +| `eniConfig.region` | Region to use when generating ENIConfig resource names | `us-west-2` | +| `eniConfig.subnets` | A map of AZ identifiers to config per AZ | `nil` | +| `eniConfig.subnets.id` | The ID of the subnet within the AZ which will be used in the ENIConfig | `nil` | +| `eniConfig.subnets.securityGroups` | The IDs of the security groups which will be used in the ENIConfig | `nil` | +| `env` | List of environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | +| `fullnameOverride` | Override the fullname of the chart | `aws-node` | +| `image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | +| `image.tag` | Image tag | `v1.7.5` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.override` | A custom docker image to use | `nil` | +| `imagePullSecrets` | Docker registry pull secret | `[]` | +| `init.image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | +| `init.image.tag` | Image tag | `v1.7.5` | +| `init.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `init.image.override` | A custom docker image to use | `nil` | +| `init.env` | List of init container environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | +| `init.securityContext` | Init container Security context | `privileged: true` | +| `originalMatchLabels` | Use the original daemonset matchLabels | `false` | +| `nameOverride` | Override the name of the chart | `aws-node` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `podSecurityContext` | Pod Security Context | `{}` | +| `podAnnotations` | annotations to add to each pod | `{}` | +| `priorityClassName` | Name of the priorityClass | `system-node-critical` | +| `resources` | Resources for the pods | `requests.cpu: 10m` | +| `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` | +| `livenessProbe` | Livenness probe settings for daemonset | (see `values.yaml`) | +| `readinessProbe` | Readiness probe settings for daemonset | (see `values.yaml`) | +| `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` | +| `tolerations` | Optional deployment tolerations | `[]` | +| `updateStrategy` | Optional update strategy | `type: RollingUpdate` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: + +```shell +$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni --values values.yaml +``` + +## Adopting the existing aws-node resources in an EKS cluster + +If you do not want to delete the existing aws-node resources in your cluster that run the aws-vpc-cni and then install this helm chart, you can adopt the resources into a release instead. This process is highlighted in this [PR comment](https://github.com/aws/eks-charts/issues/57#issuecomment-628403245). Once you have annotated and labeled all the resources this chart specifies, enable the `originalMatchLabels` flag, and also set `crd.create` to false on the helm release and run an update. If you have been careful this should not diff and leave all the resources unmodified and now under management of helm. + +Here is an example script to modify the existing resources: + +WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release. +``` +#!/usr/bin/env bash + +set -euo pipefail + +# don't import the crd. Helm cant manage the lifecycle of it anyway. +for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do + echo "setting annotations and labels on $kind/aws-node" + kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE + kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system + kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm +done +``` diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl new file mode 100644 index 00000000..230aed77 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-vpc-cni.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-vpc-cni.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-vpc-cni.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-vpc-cni.labels" -}} +app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} +helm.sh/chart: {{ include "aws-vpc-cni.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-vpc-cni.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-vpc-cni.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml new file mode 100644 index 00000000..0635b5ed --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +rules: + - apiGroups: + - crd.k8s.amazonaws.com + resources: + - eniconfigs + verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - pods + - namespaces + verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch", "get", "update"] + - apiGroups: ["extensions"] + resources: + - '*' + verbs: ["list", "watch"] diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..5cadd1b1 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "aws-vpc-cni.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "aws-vpc-cni.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml new file mode 100644 index 00000000..401a8c19 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.cniConfig.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +data: + 10-aws.conflist: {{ .Values.cniConfig.fileContents | b64enc }} +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml new file mode 100644 index 00000000..bdd29e7a --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml @@ -0,0 +1,19 @@ +{{- if .Values.crd.create -}} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: eniconfigs.crd.k8s.amazonaws.com + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +spec: + scope: Cluster + group: crd.k8s.amazonaws.com + versions: + - name: v1alpha1 + served: true + storage: true + names: + plural: eniconfigs + singular: eniconfig + kind: ENIConfig +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml new file mode 100644 index 00000000..10388ef0 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml @@ -0,0 +1,138 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ include "aws-vpc-cni.fullname" . }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +spec: + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + selector: + matchLabels: +{{- if .Values.originalMatchLabels }} + k8s-app: aws-node +{{- else }} + app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + k8s-app: aws-node + spec: + priorityClassName: "{{ .Values.priorityClassName }}" + serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }} + hostNetwork: true + initContainers: + - name: aws-vpc-cni-init + image: "{{- if .Values.init.image.override }}{{- .Values.init.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.init.image.region }}.amazonaws.com/amazon-k8s-cni-init:{{- .Values.init.image.tag }}{{- end}}" + imagePullPolicy: {{ .Values.init.image.pullPolicy }} + env: +{{- range $key, $value := .Values.init.env }} + - name: {{ $key }} + value: {{ $value | quote }} +{{- end }} + securityContext: + {{- toYaml .Values.init.securityContext | nindent 12 }} + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + terminationGracePeriodSeconds: 10 + tolerations: + - operator: Exists + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: aws-node + image: "{{- if .Values.image.override }}{{- .Values.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.image.region }}.amazonaws.com/amazon-k8s-cni:{{- .Values.image.tag }}{{- end}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 61678 + name: metrics + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 12 }} + env: +{{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} +{{- end }} + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir +{{- if .Values.cniConfig.enabled }} + # the dockerfile copies the baked in config to this location, lets overwrite it with ours + # the entrypoint.sh script will then copy our config to /host/etc/cni/net.d on boot + - name: cni-config + mountPath: /app/10-aws.conflist + subPath: 10-aws.conflist +{{- end }} + - mountPath: /host/var/log/aws-routed-eni + name: log-dir + - mountPath: /var/run/dockershim.sock + name: dockershim + - mountPath: /var/run/aws-node + name: run-dir + - mountPath: /run/xtables.lock + name: xtables-lock + volumes: + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d +{{- if .Values.cniConfig.enabled }} + - name: cni-config + configMap: + name: {{ include "aws-vpc-cni.fullname" . }} +{{- end }} + - name: dockershim + hostPath: + path: /var/run/dockershim.sock + - name: log-dir + hostPath: + path: /var/log/aws-routed-eni + type: DirectoryOrCreate + - name: run-dir + hostPath: + path: /var/run/aws-node + type: DirectoryOrCreate + - name: xtables-lock + hostPath: + path: /run/xtables.lock + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml new file mode 100644 index 00000000..6654ee60 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml @@ -0,0 +1,17 @@ +{{- if .Values.eniConfig.create }} +{{- range $key, $value := (required ".Values.eniConfig.subnets must be specified" .Values.eniConfig.subnets) }} +apiVersion: crd.k8s.amazonaws.com/v1alpha1 +kind: ENIConfig +metadata: + name: {{ required ".Values.eniConfig.region must be specified" $.Values.eniConfig.region }}{{ $key }} +spec: + {{- if $value.securityGroups }} + securityGroups: + {{- range $sg := $value.securityGroups }} + - {{ $sg }} + {{- end }} + {{- end }} + subnet: {{ $value.id }} +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml new file mode 100644 index 00000000..88515669 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "aws-vpc-cni.serviceAccountName" . }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: +{{ include "aws-vpc-cni.labels" . | indent 4 }} +{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml new file mode 100644 index 00000000..84388b40 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml @@ -0,0 +1,161 @@ +# Default values for aws-vpc-cni. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This default name override is to maintain backwards compatability with +# existing naming +nameOverride: aws-node + +init: + image: + tag: v1.7.5 + region: us-west-2 + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + env: + DISABLE_TCP_EARLY_DEMUX: "false" + securityContext: + privileged: true + +image: + region: us-west-2 + tag: v1.7.5 + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + +# The CNI supports a number of environment variable settings +# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables +env: + ADDITIONAL_ENI_TAGS: "{}" + AWS_VPC_CNI_NODE_PORT_SUPPORT: "true" + AWS_VPC_ENI_MTU: "9001" + AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false" + AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false" + AWS_VPC_K8S_CNI_EXTERNALSNAT: "false" + AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log" + AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG + AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng" + AWS_VPC_K8S_CNI_VETHPREFIX: eni + AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log" + AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG + DISABLE_INTROSPECTION: "false" + DISABLE_METRICS: "false" + ENABLE_POD_ENI: "false" + WARM_ENI_TARGET: "1" + +# this flag enables you to use the match label that was present in the original daemonset deployed by EKS +# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release +originalMatchLabels: false + +cniConfig: + enabled: false + fileContents: "" + +imagePullSecrets: [] + +fullnameOverride: "aws-node" + +priorityClassName: system-node-critical + +podSecurityContext: {} + +podAnnotations: {} + +securityContext: + capabilities: + add: + - "NET_ADMIN" + +crd: + create: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +livenessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + initialDelaySeconds: 60 + +readinessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + initialDelaySeconds: 1 + +resources: + requests: + cpu: 10m + +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + +nodeSelector: {} + +tolerations: [] + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "beta.kubernetes.io/os" + operator: In + values: + - linux + - key: "beta.kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + +eniConfig: + # Specifies whether ENIConfigs should be created + create: false + region: us-west-2 + subnets: + # Key identifies the AZ + # Value contains the subnet ID and security group IDs within that AZ + # a: + # id: subnet-123 + # securityGroups: + # - sg-123 + # b: + # id: subnet-456 + # securityGroups: + # - sg-456 + # c: + # id: subnet-789 + # securityGroups: + # - sg-789 \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore b/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml b/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml new file mode 100644 index 00000000..6773a3f6 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: coredns-config +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: 0.1 diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl new file mode 100644 index 00000000..0c858639 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns-config.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "coredns-config.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "coredns-config.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "coredns-config.labels" -}} +helm.sh/chart: {{ include "coredns-config.chart" . }} +{{ include "coredns-config.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "coredns-config.selectorLabels" -}} +app.kubernetes.io/name: {{ include "coredns-config.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns-config.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "coredns-config.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml new file mode 100644 index 00000000..0cd29a18 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml @@ -0,0 +1,31 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: coredns + namespace: kube-system + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns +data: + Corefile: | + .:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } + {{- range .Values.managed_dns }} + {{ . }}:53 { + errors + cache 30 + forward . {{ join " " $.Values.managed_dns_resolvers }} + } + {{ end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml b/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml new file mode 100644 index 00000000..843a6389 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml @@ -0,0 +1,4 @@ +# List of managed DNS +managed_dns: [] +# List of resolvers +managed_dns_resolvers: [] diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml new file mode 100644 index 00000000..e0583ff2 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: iam-eks-user-mapper +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl new file mode 100644 index 00000000..925c198e --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "iam-eks-user-mapper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "iam-eks-user-mapper.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "iam-eks-user-mapper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "iam-eks-user-mapper.labels" -}} +helm.sh/chart: {{ include "iam-eks-user-mapper.chart" . }} +{{ include "iam-eks-user-mapper.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "iam-eks-user-mapper.selectorLabels" -}} +app.kubernetes.io/name: {{ include "iam-eks-user-mapper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "iam-eks-user-mapper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "iam-eks-user-mapper.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml new file mode 100644 index 00000000..c4c60995 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "iam-eks-user-mapper.fullname" . }} + namespace: kube-system + labels: + {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "iam-eks-user-mapper.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "iam-eks-user-mapper.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "iam-eks-user-mapper.serviceAccountName" . }} + automountServiceAccountToken: true + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_REGION + value: {{ .Values.aws.region }} + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.aws.accessKey }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: awsKey + name: {{ include "iam-eks-user-mapper.fullname" . }} + command: + - ./app + - --aws-iam-group + - {{ .Values.syncIamGroup }} + - --k8s-cap + - system:masters + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml new file mode 100644 index 00000000..1c82ee2c --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml @@ -0,0 +1,24 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: eks-configmap-modifier-role + namespace: kube-system +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "update"] + resourceNames: ["aws-auth"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: eks-configmap-modifier-rolebinding +subjects: + - kind: ServiceAccount + name: {{ include "iam-eks-user-mapper.serviceAccountName" . }} + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: eks-configmap-modifier-role \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml new file mode 100644 index 00000000..ca159331 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "iam-eks-user-mapper.fullname" . }} + namespace: kube-system + labels: + {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} +type: Opaque +data: + awsKey: {{ .Values.aws.secretKey | b64enc }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml new file mode 100644 index 00000000..056ec349 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "iam-eks-user-mapper.serviceAccountName" . }} + namespace: kube-system + labels: + {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml new file mode 100644 index 00000000..82fc53dd --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml @@ -0,0 +1,65 @@ +# Default values for iam-eks-user-mapper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: ygrene/iam-eks-user-mapper + pullPolicy: IfNotPresent + tag: "latest" + +aws: + accessKey: "" + secretKey: "" + region: "" + +syncIamGroup: "" + +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "iam-eks-user-mapper" + +labels: + app: iam-eks-user-mapper + +selectorLabels: + app: iam-eks-user-mapper + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore b/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml new file mode 100644 index 00000000..8227ff32 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: q-storageclass +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 0.1 diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl new file mode 100644 index 00000000..0edf0421 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "q-ebs-csi-config.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "q-ebs-csi-config.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "q-ebs-csi-config.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "q-ebs-csi-config.labels" -}} +helm.sh/chart: {{ include "q-ebs-csi-config.chart" . }} +{{ include "q-ebs-csi-config.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "q-ebs-csi-config.selectorLabels" -}} +app.kubernetes.io/name: {{ include "q-ebs-csi-config.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "q-ebs-csi-config.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "q-ebs-csi-config.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml new file mode 100644 index 00000000..abdd4923 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml @@ -0,0 +1,64 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-gp2-0 + labels: + aws-type: "gp2" + qovery-type: "ssd" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: gp2 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-io1-0 + labels: + aws-type: "io1" + qovery-type: "nvme" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: io1 + iopsPerGB: "32" + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-st1-0 + labels: + aws-type: "st1" + qovery-type: "hdd" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: st1 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-sc1-0 + labels: + aws-type: "sc1" + qovery-type: "cold" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: sc1 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml new file mode 100644 index 00000000..e69de29b diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 4ff9cf5c..c9a89cc9 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -123,29 +123,38 @@ pub trait Kubernetes: Listen { (StringPath::from(&local_kubeconfig_generated), kubeconfig_file) } - None => { + None => match retry::retry(Fibonacci::from_millis(5000).take(5), || { match self .config_file_store() .get(bucket_name.as_str(), object_key.as_str(), true) { - Ok((path, file)) => (path, file), + Ok((path, file)) => retry::OperationResult::Ok((path, file)), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage), + self.get_event_details(stage.clone()), err.into(), ); self.logger().log(EngineEvent::Error(error.clone(), None)); - return Err(error); + retry::OperationResult::Retry(error) } } - } + }) { + Ok((path, file)) => (path, file), + Err(Operation { error, .. }) => return Err(error), + Err(retry::Error::Internal(msg)) => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + self.get_event_details(stage.clone()), + CommandError::new("Error while trying to get kubeconfig file.".to_string(), Some(msg), None), + )) + } + }, }; let metadata = match file.metadata() { Ok(metadata) => metadata, Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage), + self.get_event_details(stage.clone()), CommandError::new_from_safe_message(format!("Error getting file metadata, error: {}", err,)), ); self.logger().log(EngineEvent::Error(error.clone(), None)); @@ -157,7 +166,7 @@ pub trait Kubernetes: Listen { permissions.set_mode(0o400); if let Err(err) = std::fs::set_permissions(string_path.as_str(), permissions) { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage), + self.get_event_details(stage.clone()), CommandError::new_from_safe_message(format!("Error setting file permissions, error: {}", err,)), ); self.logger().log(EngineEvent::Error(error.clone(), None)); From 50fe0b8c1d3795e0b10d60934256031b66ff2190 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Fri, 29 Apr 2022 18:45:40 +0200 Subject: [PATCH 095/122] lint: remove redundant clone() --- src/cloud_provider/kubernetes.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index c9a89cc9..53681e74 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -131,7 +131,7 @@ pub trait Kubernetes: Listen { Ok((path, file)) => retry::OperationResult::Ok((path, file)), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), + self.get_event_details(stage), err.into(), ); self.logger().log(EngineEvent::Error(error.clone(), None)); @@ -143,7 +143,7 @@ pub trait Kubernetes: Listen { Err(Operation { error, .. }) => return Err(error), Err(retry::Error::Internal(msg)) => { return Err(EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), + self.get_event_details(stage), CommandError::new("Error while trying to get kubeconfig file.".to_string(), Some(msg), None), )) } @@ -154,7 +154,7 @@ pub trait Kubernetes: Listen { Ok(metadata) => metadata, Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), + self.get_event_details(stage), CommandError::new_from_safe_message(format!("Error getting file metadata, error: {}", err,)), ); self.logger().log(EngineEvent::Error(error.clone(), None)); @@ -166,7 +166,7 @@ pub trait Kubernetes: Listen { permissions.set_mode(0o400); if let Err(err) = std::fs::set_permissions(string_path.as_str(), permissions) { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage.clone()), + self.get_event_details(stage), CommandError::new_from_safe_message(format!("Error setting file permissions, error: {}", err,)), ); self.logger().log(EngineEvent::Error(error.clone(), None)); From 67e157d4a0f562b0e56a6a30eb6e02296644e815 Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Fri, 29 Apr 2022 19:16:05 +0200 Subject: [PATCH 096/122] build: fix moved value --- src/cloud_provider/kubernetes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 53681e74..acc96ce1 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -131,7 +131,7 @@ pub trait Kubernetes: Listen { Ok((path, file)) => retry::OperationResult::Ok((path, file)), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage), + self.get_event_details(stage.clone()), err.into(), ); self.logger().log(EngineEvent::Error(error.clone(), None)); From e665d96cdeb8f0c46f78441d387d17e9f672897e Mon Sep 17 00:00:00 2001 From: Romaric Philogene Date: Sun, 1 May 2022 10:27:07 +0200 Subject: [PATCH 097/122] chore: upgrade ec2 terraform libs --- lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf b/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf index e5235b07..aa805acc 100644 --- a/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf +++ b/lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf @@ -2,31 +2,31 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.11.0" + version = "~> 4.12.1" } external = { source = "hashicorp/external" - version = "~> 1.2" + version = "~> 2.2" } vault = { source = "hashicorp/vault" - version = "~> 2.24.1" + version = "~> 3.5" } local = { source = "hashicorp/local" - version = "~> 1.4" + version = "~> 2.2" } null = { source = "hashicorp/null" - version = "~> 2.1" + version = "~> 3.1" } random = { source = "hashicorp/random" - version = "~> 2.3" + version = "~> 3.1" } time = { source = "hashicorp/time" - version = "~> 0.3" + version = "~> 0.7" } } required_version = ">= 0.13" @@ -57,4 +57,4 @@ provider "vault" { } } {% endif %} -} \ No newline at end of file +} From 153f054123d63a0d0e24246b5f7f86343d26ff88 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Sun, 1 May 2022 19:25:50 +0200 Subject: [PATCH 098/122] feat: add kubeconfig to s3 + firewall rules + clean + EC2 helm charts --- .../bootstrap/charts/aws-calico/.helmignore | 22 - .../bootstrap/charts/aws-calico/Chart.yaml | 6 - .../bootstrap/charts/aws-calico/README.md | 66 --- .../charts/aws-calico/crds/crds.yaml | 214 --------- .../charts/aws-calico/templates/_helpers.tpl | 55 --- .../aws-calico/templates/config-map.yaml | 22 - .../aws-calico/templates/daemon-set.yaml | 142 ------ .../aws-calico/templates/deployment.yaml | 128 ------ .../templates/pod-disruption-budget.yaml | 13 - .../templates/podsecuritypolicy.yaml | 211 --------- .../charts/aws-calico/templates/rbac.yaml | 214 --------- .../templates/service-accounts.yaml | 18 - .../charts/aws-calico/templates/service.yaml | 15 - .../bootstrap/charts/aws-calico/values.yaml | 54 --- .../charts/aws-limits-exporter/.helmignore | 23 - .../charts/aws-limits-exporter/Chart.yaml | 6 - .../templates/_helpers.tpl | 63 --- .../templates/deployment.yaml | 67 --- .../templates/secrets.yaml | 8 - .../templates/service.yaml | 16 - .../templates/serviceaccount.yaml | 12 - .../templates/servicemonitor.yaml | 19 - .../charts/aws-limits-exporter/values.yaml | 65 --- .../aws-node-termination-handler/.helmignore | 22 - .../aws-node-termination-handler/Chart.yaml | 27 -- .../aws-node-termination-handler/README.md | 96 ---- .../templates/_helpers.tpl | 57 --- .../templates/clusterrole.yaml | 37 -- .../templates/clusterrolebinding.yaml | 12 - .../templates/daemonset.yaml | 141 ------ .../templates/psp.yaml | 57 --- .../templates/serviceaccount.yaml | 13 - .../aws-node-termination-handler/values.yaml | 102 ----- .../bootstrap/charts/aws-ui-view/.helmignore | 22 - .../bootstrap/charts/aws-ui-view/Chart.yaml | 5 - .../charts/aws-ui-view/templates/_helpers.tpl | 47 -- .../aws-ui-view/templates/clusterrole.yaml | 35 -- .../templates/clusterrolebinding.yaml | 12 - .../bootstrap/charts/aws-ui-view/values.yaml | 3 - .../bootstrap/charts/aws-vpc-cni/.helmignore | 22 - .../bootstrap/charts/aws-vpc-cni/Chart.yaml | 18 - .../bootstrap/charts/aws-vpc-cni/README.md | 94 ---- .../charts/aws-vpc-cni/templates/_helpers.tpl | 57 --- .../aws-vpc-cni/templates/clusterrole.yaml | 25 - .../templates/clusterrolebinding.yaml | 14 - .../aws-vpc-cni/templates/configmap.yaml | 10 - .../templates/customresourcedefinition.yaml | 19 - .../aws-vpc-cni/templates/daemonset.yaml | 138 ------ .../aws-vpc-cni/templates/eniconfig.yaml | 17 - .../aws-vpc-cni/templates/serviceaccount.yaml | 12 - .../bootstrap/charts/aws-vpc-cni/values.yaml | 161 ------- .../charts/iam-eks-user-mapper/.helmignore | 23 - .../charts/iam-eks-user-mapper/Chart.yaml | 6 - .../templates/_helpers.tpl | 63 --- .../templates/deployment.yaml | 65 --- .../iam-eks-user-mapper/templates/rbac.yaml | 24 - .../iam-eks-user-mapper/templates/secret.yaml | 10 - .../templates/serviceaccount.yaml | 13 - .../charts/iam-eks-user-mapper/values.yaml | 65 --- lib/aws-ec2/bootstrap/documentdb.tf | 14 +- lib/aws-ec2/bootstrap/ec2-sec-group.tf | 16 + lib/aws-ec2/bootstrap/{ec2.j2.tf => ec2.tf} | 17 +- lib/aws-ec2/bootstrap/elasticcache.tf | 14 +- lib/aws-ec2/bootstrap/rds.tf | 24 +- lib/aws-ec2/bootstrap/s3-qovery-buckets.tf | 29 +- .../aws/kubernetes/ec2_helm_charts.rs | 433 ++++++++++++++++++ .../{helm_charts.rs => eks_helm_charts.rs} | 10 +- src/cloud_provider/aws/kubernetes/mod.rs | 159 ++++--- .../digitalocean/kubernetes/mod.rs | 32 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 30 -- src/errors/io.rs | 2 + src/errors/mod.rs | 28 ++ tests/aws/aws_kubernetes_ec2.rs | 2 - 73 files changed, 613 insertions(+), 3200 deletions(-) delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/README.md delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml delete mode 100644 lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml rename lib/aws-ec2/bootstrap/{ec2.j2.tf => ec2.tf} (66%) create mode 100644 src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs rename src/cloud_provider/aws/kubernetes/{helm_charts.rs => eks_helm_charts.rs} (99%) diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore deleted file mode 100644 index 5ae7e8be..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -crds/kustomization.yaml diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml deleted file mode 100644 index 40ab5de7..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -appVersion: 3.13.4 -description: A Helm chart for installing Calico on AWS -icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png -name: aws-calico -version: 0.3.1 diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/README.md b/lib/aws-ec2/bootstrap/charts/aws-calico/README.md deleted file mode 100644 index 9abbca69..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Calico on AWS - -This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html - -## Prerequisites - -- Kubernetes 1.11+ running on AWS - -## Installing the Chart - -First add the EKS repository to Helm: - -```shell -helm repo add eks https://aws.github.io/eks-charts -``` - -Install the Calico CRDs: - -```shell -kubectl apply -k github.com/aws/eks-charts/tree/master/stable/aws-calico/crds -``` - -To install the chart with the release name `aws-calico` and default configuration: - -```shell -$ helm install --name aws-calico --namespace kube-system eks/aws-calico -``` - -To install into an EKS cluster where the CNI is already installed, you can run: - -```shell -helm upgrade --install --recreate-pods --force aws-calico --namespace kube-system eks/aws-calico -``` - -If you receive an error similar to `Error: release aws-calico failed: "aws-calico" already exists`, simply rerun the above command. - -## Configuration - -The following table lists the configurable parameters for this chart and their default values. - -| Parameter | Description | Default | -|----------------------------------------|---------------------------------------------------------|---------------------------------| -| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | -| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | -| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | -| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | -| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | -| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | -| `calico.node.logseverity` | Calico Node Log Severity | `Info` | -| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | -| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.tag` | Calico version | `v3.8.1` | -| `fullnameOverride` | Override the fullname of the chart | `calico` | -| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | -| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | -| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: - -```shell -$ helm install --name aws-calico --namespace kube-system eks/aws-calico --values values.yaml -``` diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml deleted file mode 100644 index 73fe142f..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/crds/crds.yaml +++ /dev/null @@ -1,214 +0,0 @@ -# Create all the CustomResourceDefinitions needed for -# Calico policy-only mode. - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networksets.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - versions: - - name: v1 - served: true - storage: true - names: - kind: NetworkSet - plural: networksets - singular: networkset \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl deleted file mode 100644 index 0a18027c..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/_helpers.tpl +++ /dev/null @@ -1,55 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-calico.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "aws-calico.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "aws-calico.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "aws-calico.labels" -}} -helm.sh/chart: {{ include "aws-calico.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "aws-calico.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "aws-calico.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml deleted file mode 100644 index 9a3cfaa5..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/config-map.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -data: - ladder: |- - { - "coresToReplicas": [], - "nodesToReplicas": - [ - [1, 1], - [10, 2], - [100, 3], - [250, 4], - [500, 5], - [1000, 6], - [1500, 7], - [2000, 8] - ] - } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml deleted file mode 100644 index ce553146..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/daemon-set.yaml +++ /dev/null @@ -1,142 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: "{{ include "aws-calico.fullname" . }}-node" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" -{{ include "aws-calico.labels" . | indent 4 }} -spec: - selector: - matchLabels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" - spec: - priorityClassName: system-node-critical - nodeSelector: - {{- toYaml .Values.calico.node.nodeSelector | nindent 8 }} - hostNetwork: true - serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: "{{ .Values.calico.node.image }}:{{ .Values.calico.tag }}" - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Use eni not cali for interface prefix - - name: FELIX_INTERFACEPREFIX - value: "eni" - # Enable felix info logging. - - name: FELIX_LOGSEVERITYSCREEN - value: "{{ .Values.calico.node.logseverity }}" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,ecs" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - - name: FELIX_TYPHAK8SSERVICENAME - value: "calico-typha" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # This will make Felix honor AWS VPC CNI's mangle table - # rules. - - name: FELIX_IPTABLESMANGLEALLOWACTION - value: Return - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - - name: FELIX_LOGSEVERITYSYS - value: "none" - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "true" - - name: NO_DEFAULT_POOLS - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # No IP address needed. - - name: IP - value: "" - - name: FELIX_HEALTHENABLED - value: "true" - {{- if .Values.calico.node.extraEnv }} - {{- toYaml .Values.calico.node.extraEnv | nindent 12 }} - {{- end }} - securityContext: - privileged: true - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -felix-ready - periodSeconds: 10 - resources: - {{- toYaml .Values.calico.node.resources | nindent 12 }} - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - volumes: - # Used to ensure proper kmods are installed. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml deleted file mode 100644 index a879a8d2..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/deployment.yaml +++ /dev/null @@ -1,128 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" -{{ include "aws-calico.labels" . | indent 4 }} -spec: - revisionHistoryLimit: 2 - selector: - matchLabels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" - template: - metadata: - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - priorityClassName: system-cluster-critical - nodeSelector: - {{- toYaml .Values.calico.typha.nodeSelector | nindent 8 }} - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - {{- if .Values.calico.typha.tolerations }} -{{ toYaml .Values.calico.typha.tolerations | indent 10 }} - {{- end }} - hostNetwork: true - serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: "{{ .Values.calico.typha.image }}:{{ .Values.calico.tag }}" - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Use eni not cali for interface prefix - - name: FELIX_INTERFACEPREFIX - value: "eni" - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - - name: TYPHA_LOGSEVERITYSCREEN - value: "{{ .Values.calico.typha.logseverity }}" - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "true" - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "9093" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_MAXCONNECTIONSLOWERLIMIT - value: "1" - - name: TYPHA_HEALTHENABLED - value: "true" - # This will make Felix honor AWS VPC CNI's mangle table - # rules. - - name: FELIX_IPTABLESMANGLEALLOWACTION - value: Return - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 - resources: - {{- toYaml .Values.calico.typha.resources | nindent 12 }} - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" -{{ include "aws-calico.labels" . | indent 4 }} -spec: - selector: - matchLabels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" - replicas: 1 - template: - metadata: - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" - spec: - priorityClassName: system-cluster-critical - nodeSelector: - {{- toYaml .Values.calico.typha_autoscaler.nodeSelector | nindent 8 }} - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - {{- if .Values.calico.typha_autoscaler.tolerations }} -{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }} - {{- end }} - containers: - - image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}" - name: autoscaler - command: - - /cluster-proportional-autoscaler - - --namespace={{ .Release.Namespace }} - - --configmap={{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler - - --target=deployment/{{ include "aws-calico.fullname" . }}-typha - - --logtostderr=true - - --v=2 - resources: - {{- toYaml .Values.calico.typha_autoscaler.resources | nindent 12 }} - serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml deleted file mode 100644 index 8635b315..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/pod-disruption-budget.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" -{{ include "aws-calico.labels" . | indent 4 }} -spec: - maxUnavailable: 1 - selector: - matchLabels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml deleted file mode 100644 index c946ee71..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,211 +0,0 @@ -{{- if .Values.podSecurityPolicy.create -}} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "aws-calico.fullname" . }}-node - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node -{{ include "aws-calico.labels" . | indent 4 }} -spec: - privileged: true - allowPrivilegeEscalation: true - requiredDropCapabilities: - - ALL - hostNetwork: true - hostIPC: false - hostPID: false - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - - 'hostPath' - allowedHostPaths: - - pathPrefix: "/lib/modules" - readOnly: false - - pathPrefix: "/var/run/calico" - readOnly: false - - pathPrefix: "/var/lib/calico" - readOnly: false - - pathPrefix: "/run/xtables.lock" - readOnly: false - runAsUser: - rule: 'RunAsAny' - runAsGroup: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'RunAsAny' - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "aws-calico.fullname" . }}-typha - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha -{{ include "aws-calico.labels" . | indent 4 }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - hostNetwork: true - hostPorts: - - max: 5473 - min: 5473 - hostIPC: false - hostPID: false - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'RunAsAny' - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler -{{ include "aws-calico.labels" . | indent 4 }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - hostNetwork: false - hostIPC: false - hostPID: false - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'RunAsAny' - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "aws-calico.fullname" . }}-node-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node -{{ include "aws-calico.labels" . | indent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ include "aws-calico.fullname" . }}-node ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "aws-calico.fullname" . }}-typha-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha -{{ include "aws-calico.labels" . | indent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ include "aws-calico.fullname" . }}-typha ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler -{{ include "aws-calico.labels" . | indent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "aws-calico.fullname" . }}-node-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - kind: Role - name: {{ include "aws-calico.fullname" . }}-node-psp - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - name: {{ include "aws-calico.serviceAccountName" . }}-node - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "aws-calico.fullname" . }}-typha-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - kind: Role - name: {{ include "aws-calico.fullname" . }}-typha-psp - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - name: {{ include "aws-calico.serviceAccountName" . }}-node - namespace: {{ .Release.Namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp - labels: - app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - kind: Role - name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - name: {{ include "aws-calico.serviceAccountName" . }}-typha-cpha - namespace: {{ .Release.Namespace }} -{{- end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml deleted file mode 100644 index 7caa7fa4..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/rbac.yaml +++ /dev/null @@ -1,214 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: "{{ include "aws-calico.fullname" . }}-node" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" -{{ include "aws-calico.labels" . | indent 4 }} -rules: - # The CNI plugin needs to get pods, nodes, namespaces, and configmaps. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: "{{ include "aws-calico.fullname" . }}-node" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "{{ include "aws-calico.fullname" . }}-node" -subjects: - - kind: ServiceAccount - name: "{{ include "aws-calico.serviceAccountName" . }}-node" - namespace: {{ .Release.Namespace }} - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" -subjects: - - kind: ServiceAccount - name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" - namespace: {{ .Release.Namespace }} - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get"] - - apiGroups: ["extensions", "apps"] - resources: ["deployments/scale"] - verbs: ["get", "update"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" - labels: -{{ include "aws-calico.labels" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ include "aws-calico.fullname" . }}-typha-cpha" -subjects: - - kind: ServiceAccount - name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" - namespace: "{{ .Release.Namespace }}" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml deleted file mode 100644 index 21409395..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service-accounts.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Create the ServiceAccount and roles necessary for Calico. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: "{{ include "aws-calico.serviceAccountName" . }}-node" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" -{{ include "aws-calico.labels" . | indent 4 }} - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-cpha" -{{ include "aws-calico.labels" . | indent 4 }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml deleted file mode 100644 index 4edb632d..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "{{ include "aws-calico.fullname" . }}-typha" - labels: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" -{{ include "aws-calico.labels" . | indent 4 }} -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: "{{ include "aws-calico.fullname" . }}-typha" - selector: - app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" diff --git a/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml deleted file mode 100644 index c192e92e..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-calico/values.yaml +++ /dev/null @@ -1,54 +0,0 @@ -fullnameOverride: calico - -serviceAccount: - create: true - -podSecurityPolicy: - create: false - -calico: - tag: v3.13.4 - - typha: - logseverity: Info #Debug, Info, Warning, Error, Fatal - image: quay.io/calico/typha - resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "96Mi" - cpu: "100m" - tolerations: [] - nodeSelector: - beta.kubernetes.io/os: linux - node: - logseverity: Info #Debug, Info, Warning, Error, Fatal - image: quay.io/calico/node - resources: - requests: - memory: "32Mi" - cpu: "20m" - limits: - memory: "64Mi" - cpu: "100m" - extraEnv: [] - # - name: SOME_VAR - # value: 'some value' - nodeSelector: - beta.kubernetes.io/os: linux - typha_autoscaler: - resources: - requests: - memory: "16Mi" - cpu: "10m" - limits: - memory: "32Mi" - cpu: "10m" - tolerations: [] - nodeSelector: - beta.kubernetes.io/os: linux - -autoscaler: - tag: "1.7.1" - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64 diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml deleted file mode 100644 index c6ebc029..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: aws-limits-exporter -description: A Helm chart for Kubernetes -type: application -version: 0.1.0 -appVersion: 0.3.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl deleted file mode 100644 index bf005806..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-limits-exporter.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "aws-limits-exporter.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "aws-limits-exporter.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "aws-limits-exporter.labels" -}} -helm.sh/chart: {{ include "aws-limits-exporter.chart" . }} -{{ include "aws-limits-exporter.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "aws-limits-exporter.selectorLabels" -}} -app.kubernetes.io/name: {{ include "aws-limits-exporter.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "aws-limits-exporter.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "aws-limits-exporter.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml deleted file mode 100644 index eddcd329..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/deployment.yaml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "aws-limits-exporter.fullname" . }} - labels: - {{- include "aws-limits-exporter.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "aws-limits-exporter.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "aws-limits-exporter.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "aws-limits-exporter.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http-metrics - containerPort: 8080 - protocol: TCP - env: - - name: AWS_ACCESS_KEY - valueFrom: - secretKeyRef: - name: aws-limits-exporter - key: awsAccessKey - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: aws-limits-exporter - key: awsSecretKey - livenessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 20 - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml deleted file mode 100644 index b7bd2cd9..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/secrets.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: aws-limits-exporter -type: Opaque -data: - awsAccessKey: {{ .Values.awsCredentials.awsAccessKey | b64enc}} - awsSecretKey: {{ .Values.awsCredentials.awsSecretKey | b64enc}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml deleted file mode 100644 index 0f89dc31..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "aws-limits-exporter.fullname" . }} - labels: - app: aws-limits-exporter - {{- include "aws-limits-exporter.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http-metrics - protocol: TCP - name: http-metrics - selector: - {{- include "aws-limits-exporter.selectorLabels" . | nindent 4 }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml deleted file mode 100644 index 2bc9d0ee..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "aws-limits-exporter.serviceAccountName" . }} - labels: - {{- include "aws-limits-exporter.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml deleted file mode 100644 index 4b1423fb..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/templates/servicemonitor.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.prometheusScraping.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "aws-limits-exporter.fullname" . }} - labels: - {{- include "aws-limits-exporter.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - app: aws-limits-exporter - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} - endpoints: - - port: http-metrics - path: /metrics - interval: "{{ .Values.prometheusScraping.scrapInterval }}" -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml deleted file mode 100644 index 3726aefd..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-limits-exporter/values.yaml +++ /dev/null @@ -1,65 +0,0 @@ -replicaCount: 1 - -image: - repository: danielfm/aws-limits-exporter - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart version. - tag: "0.3.0" - -imagePullSecrets: [] -nameOverride: "aws-limits-exporter" -fullnameOverride: "aws-limits-exporter" - -awsCredentials: - awsAccessKey: "" - awsSecretKey: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -labels: - app.kubernetes.io/name: aws-limits-exporter - -selectorLabels: - app.kubernetes.io/name: aws-limits-exporter - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 8080 - -resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -prometheusScraping: - enabled: true - scrapInterval: "60s" diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore deleted file mode 100644 index 50af0317..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml deleted file mode 100644 index 47ed2baa..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/Chart.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -appVersion: 1.5.0 -description: A Helm chart for the AWS Node Termination Handler -home: https://github.com/aws/eks-charts -icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png -keywords: -- eks -- ec2 -- node-termination -- spot -maintainers: -- email: nckturner@users.noreply.github.com - name: Nicholas Turner - url: https://github.com/nckturner -- email: stefanprodan@users.noreply.github.com - name: Stefan Prodan - url: https://github.com/stefanprodan -- email: jillmon@users.noreply.github.com - name: Jillian Montalvo - url: https://github.com/jillmon -- email: mattrandallbecker@users.noreply.github.com - name: Matthew Becker - url: https://github.com/mattrandallbecker -name: aws-node-termination-handler -sources: -- https://github.com/aws/eks-charts -version: 0.8.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md deleted file mode 100644 index f1847304..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# AWS Node Termination Handler - -AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler. - -## Prerequisites - -* Kubernetes >= 1.11 - -## Installing the Chart - -Add the EKS repository to Helm: -```sh -helm repo add eks https://aws.github.io/eks-charts -``` -Install AWS Node Termination Handler: -To install the chart with the release name aws-node-termination-handler and default configuration: - -```sh -helm install --name aws-node-termination-handler \ - --namespace kube-system eks/aws-node-termination-handler -``` - -To install into an EKS cluster where the Node Termination Handler is already installed, you can run: - -```sh -helm upgrade --install --recreate-pods --force \ - aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler -``` - -If you receive an error similar to `Error: release aws-node-termination-handler -failed: "aws-node-termination-handler" already exists`, simply rerun -the above command. - -The [configuration](#configuration) section lists the parameters that can be configured during installation. - -## Uninstalling the Chart - -To uninstall/delete the `aws-node-termination-handler` deployment: - -```sh -helm delete --purge aws-node-termination-handler -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -The following tables lists the configurable parameters of the chart and their default values. - -Parameter | Description | Default ---- | --- | --- -`image.repository` | image repository | `amazon/aws-node-termination-handler` -`image.tag` | image tag | `` -`image.pullPolicy` | image pull policy | `IfNotPresent` -`image.pullSecrets` | image pull secrets (for private docker registries) | `[]` -`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false` -`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` -`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true` -`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80` -`webhookURL` | Posts event data to URL upon instance interruption action | `` -`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | `` -`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}` -`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}` -`dryRun` | If true, only log if a node would be drained | `false` -`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false` -`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true` -`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3` -`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false` -`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false` -`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false` -`affinity` | node/pod affinities | None -`podAnnotations` | annotations to add to each pod | `{}` -`priorityClassName` | Name of the priorityClass | `system-node-critical` -`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi` -`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet` -`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}` -`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]` -`rbac.create` | if `true`, create and use RBAC resources | `true` -`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` -`serviceAccount.create` | If `true`, create a new service account | `true` -`serviceAccount.name` | Service account to be used | None -`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` -`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime` -`securityContext.runAsUserID` | User ID to run the container | `1000` -`securityContext.runAsGroupID` | Group ID to run the container | `1000` -`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os` -`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch` -`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false` -`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092` - -## Metrics endpoint consideration -If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace -since it's listening on all interfaces. -Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl deleted file mode 100644 index 902844a7..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl +++ /dev/null @@ -1,57 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-node-termination-handler.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "aws-node-termination-handler.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "aws-node-termination-handler.labels" -}} -app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} -helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -k8s-app: aws-node-termination-handler -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "aws-node-termination-handler.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "aws-node-termination-handler.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "aws-node-termination-handler.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml deleted file mode 100644 index dc800866..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml +++ /dev/null @@ -1,37 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "aws-node-termination-handler.fullname" . }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - list -- apiGroups: - - "" - resources: - - pods/eviction - verbs: - - create -- apiGroups: - - extensions - resources: - - daemonsets - verbs: - - get -- apiGroups: - - apps - resources: - - daemonsets - verbs: - - get diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml deleted file mode 100644 index b5c25327..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "aws-node-termination-handler.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ include "aws-node-termination-handler.fullname" . }} - apiGroup: rbac.authorization.k8s.io diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml deleted file mode 100644 index fb220022..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ include "aws-node-termination-handler.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -spec: - updateStrategy: -{{ toYaml .Values.updateStrategy | indent 4 }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - {{- if .Values.podAnnotations }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - labels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - k8s-app: aws-node-termination-handler - spec: - volumes: - - name: "uptime" - hostPath: - path: "{{ .Values.procUptimeFile }}" - priorityClassName: "{{ .Values.priorityClassName }}" - affinity: - nodeAffinity: - # NOTE(jaypipes): Change when we complete - # https://github.com/aws/aws-node-termination-handler/issues/8 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }} - operator: In - values: - - linux - - key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }} - operator: In - values: - - amd64 - - arm - - arm64 - serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }} - hostNetwork: true - dnsPolicy: {{ .Values.dnsPolicy }} - containers: - - name: {{ include "aws-node-termination-handler.name" . }} - image: {{ .Values.image.repository}}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: {{ .Values.securityContext.runAsUserID }} - runAsGroup: {{ .Values.securityContext.runAsGroupID }} - allowPrivilegeEscalation: false - volumeMounts: - - name: "uptime" - mountPath: "/proc/uptime" - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SPOT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: DELETE_LOCAL_DATA - value: {{ .Values.deleteLocalData | quote }} - - name: IGNORE_DAEMON_SETS - value: {{ .Values.ignoreDaemonSets | quote }} - - name: GRACE_PERIOD - value: {{ .Values.gracePeriod | quote }} - - name: POD_TERMINATION_GRACE_PERIOD - value: {{ .Values.podTerminationGracePeriod | quote }} - - name: INSTANCE_METADATA_URL - value: {{ .Values.instanceMetadataURL | quote }} - - name: NODE_TERMINATION_GRACE_PERIOD - value: {{ .Values.nodeTerminationGracePeriod | quote }} - - name: WEBHOOK_URL - value: {{ .Values.webhookURL | quote }} - - name: WEBHOOK_HEADERS - value: {{ .Values.webhookHeaders | quote }} - - name: WEBHOOK_TEMPLATE - value: {{ .Values.webhookTemplate | quote }} - - name: DRY_RUN - value: {{ .Values.dryRun | quote }} - - name: ENABLE_SPOT_INTERRUPTION_DRAINING - value: {{ .Values.enableSpotInterruptionDraining | quote }} - - name: ENABLE_SCHEDULED_EVENT_DRAINING - value: {{ .Values.enableScheduledEventDraining | quote }} - - name: METADATA_TRIES - value: {{ .Values.metadataTries | quote }} - - name: CORDON_ONLY - value: {{ .Values.cordonOnly | quote }} - - name: TAINT_NODE - value: {{ .Values.taintNode | quote }} - - name: JSON_LOGGING - value: {{ .Values.jsonLogging | quote }} - - name: WEBHOOK_PROXY - value: {{ .Values.webhookProxy | quote }} - - name: ENABLE_PROMETHEUS_SERVER - value: {{ .Values.enablePrometheusServer | quote }} - - name: PROMETHEUS_SERVER_PORT - value: {{ .Values.prometheusServerPort | quote }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml deleted file mode 100644 index 0eda5002..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "aws-node-termination-handler.fullname" . }} - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' -spec: - privileged: false - hostIPC: false - hostNetwork: true - hostPID: false - readOnlyRootFilesystem: false - allowPrivilegeEscalation: false - allowedCapabilities: - - '*' - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "aws-node-termination-handler.fullname" . }}-psp - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -rules: - - apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "aws-node-termination-handler.fullname" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "aws-node-termination-handler.fullname" . }}-psp - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "aws-node-termination-handler.fullname" . }}-psp -subjects: - - kind: ServiceAccount - name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml deleted file mode 100644 index 55f2d766..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml deleted file mode 100644 index 469a51e4..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-node-termination-handler/values.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Default values for aws-node-termination-handler. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - repository: amazon/aws-node-termination-handler - tag: v1.5.0 - pullPolicy: IfNotPresent - pullSecrets: [] - -securityContext: - runAsUserID: 1000 - runAsGroupID: 1000 - -nameOverride: "" -fullnameOverride: "" - -priorityClassName: system-node-critical - -podAnnotations: {} - -resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" - -## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved -enableSpotInterruptionDraining: "" - -## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event -enableScheduledEventDraining: "" - -taintNode: false - -## dryRun tells node-termination-handler to only log calls to kubernetes control plane -dryRun: false - -# deleteLocalData tells kubectl to continue even if there are pods using -# emptyDir (local data that will be deleted when the node is drained). -deleteLocalData: "" - -# ignoreDaemonSets causes kubectl to skip Daemon Set managed pods. -ignoreDaemonSets: "" - -# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully. -# If negative, the default value specified in the pod will be used. -gracePeriod: "" -podTerminationGracePeriod: "" - -# nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. -nodeTerminationGracePeriod: "" - -# webhookURL if specified, posts event data to URL upon instance interruption action. -webhookURL: "" - -# webhookProxy if specified, uses this HTTP(S) proxy configuration. -webhookProxy: "" - -# webhookHeaders if specified, replaces the default webhook headers. -webhookHeaders: "" - -# webhookTemplate if specified, replaces the default webhook message template. -webhookTemplate: "" - -# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80) -instanceMetadataURL: "" - -# (TESTING USE): Mount path for uptime file -procUptimeFile: "/proc/uptime" - -# nodeSelector tells the daemonset where to place the node-termination-handler -# pods. By default, this value is empty and every node will receive a pod. -nodeSelector: {} - -nodeSelectorTermsOs: "" -nodeSelectorTermsArch: "" - -enablePrometheusServer: false -prometheusServerPort: "9092" - -tolerations: - - operator: "Exists" - -affinity: {} - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. If namenot set and create is true, - # a name is generated using fullname template - name: - annotations: {} - # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME - -rbac: - # rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created - pspEnabled: true - -dnsPolicy: "ClusterFirstWithHostNet" diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore deleted file mode 100644 index 50af0317..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml deleted file mode 100644 index 6385e4d6..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1 -description: A Helm chart for the AWS UI View -name: aws-ui-view -version: 1.0.0 diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl deleted file mode 100644 index 76e96336..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/_helpers.tpl +++ /dev/null @@ -1,47 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-ui-view.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "aws-ui-view.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "aws-ui-view.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "aws-ui-view.labels" -}} -app.kubernetes.io/name: {{ include "aws-ui-view.name" . }} -helm.sh/chart: {{ include "aws-ui-view.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -k8s-app: aws-node -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml deleted file mode 100644 index dff89b24..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrole.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - name: {{ include "aws-ui-view.fullname" . }} -rules: - - apiGroups: - - '*' - resources: - - nodes - - namespaces - - pods - - events - verbs: - - get - - list - - watch - - apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - - replicasets - verbs: - - get - - list - - apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml deleted file mode 100644 index 16802963..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "aws-ui-view.fullname" . }} -subjects: - - kind: Group - name: Admins - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: {{ include "aws-ui-view.fullname" . }} - apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml deleted file mode 100644 index 299bcc74..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-ui-view/values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -nameOverride: aws-ui-view - -fullnameOverride: "aws-ui-view" \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore deleted file mode 100644 index 50af0317..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml deleted file mode 100644 index 2f572eb2..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -appVersion: v1.7.5 -description: A Helm chart for the AWS VPC CNI -home: https://github.com/aws/amazon-vpc-cni-k8s -icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png -keywords: -- eks -- cni -- networking -- vpc -maintainers: -- email: jayanthvn@users.noreply.github.com - name: Jayanth Varavani - url: https://github.com/jayanthvn -name: aws-vpc-cni -sources: -- https://github.com/aws/amazon-vpc-cni-k8s -version: 1.1.3 diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md deleted file mode 100644 index 768f629d..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# AWS VPC CNI - -This chart installs the AWS CNI Daemonset: https://github.com/aws/amazon-vpc-cni-k8s - -## Prerequisites - -- Kubernetes 1.11+ running on AWS - -## Installing the Chart - -First add the EKS repository to Helm: - -```shell -helm repo add eks https://aws.github.io/eks-charts -``` - -To install the chart with the release name `aws-vpc-cni` and default configuration: - -```shell -$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni -``` - -To install into an EKS cluster where the CNI is already installed, see [this section below](#adopting-the-existing-aws-node-resources-in-an-eks-cluster) - -## Configuration - -The following table lists the configurable parameters for this chart and their default values. - -| Parameter | Description | Default | -| ------------------------|---------------------------------------------------------|-------------------------------------| -| `affinity` | Map of node/pod affinities | `{}` | -| `cniConfig.enabled` | Enable overriding the default 10-aws.conflist file | `false` | -| `cniConfig.fileContents`| The contents of the custom cni config file | `nil` | -| `eniConfig.create` | Specifies whether to create ENIConfig resource(s) | `false` | -| `eniConfig.region` | Region to use when generating ENIConfig resource names | `us-west-2` | -| `eniConfig.subnets` | A map of AZ identifiers to config per AZ | `nil` | -| `eniConfig.subnets.id` | The ID of the subnet within the AZ which will be used in the ENIConfig | `nil` | -| `eniConfig.subnets.securityGroups` | The IDs of the security groups which will be used in the ENIConfig | `nil` | -| `env` | List of environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | -| `fullnameOverride` | Override the fullname of the chart | `aws-node` | -| `image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | -| `image.tag` | Image tag | `v1.7.5` | -| `image.pullPolicy` | Container pull policy | `IfNotPresent` | -| `image.override` | A custom docker image to use | `nil` | -| `imagePullSecrets` | Docker registry pull secret | `[]` | -| `init.image.region` | ECR repository region to use. Should match your cluster | `us-west-2` | -| `init.image.tag` | Image tag | `v1.7.5` | -| `init.image.pullPolicy` | Container pull policy | `IfNotPresent` | -| `init.image.override` | A custom docker image to use | `nil` | -| `init.env` | List of init container environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) | -| `init.securityContext` | Init container Security context | `privileged: true` | -| `originalMatchLabels` | Use the original daemonset matchLabels | `false` | -| `nameOverride` | Override the name of the chart | `aws-node` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `podSecurityContext` | Pod Security Context | `{}` | -| `podAnnotations` | annotations to add to each pod | `{}` | -| `priorityClassName` | Name of the priorityClass | `system-node-critical` | -| `resources` | Resources for the pods | `requests.cpu: 10m` | -| `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` | -| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` | -| `livenessProbe` | Livenness probe settings for daemonset | (see `values.yaml`) | -| `readinessProbe` | Readiness probe settings for daemonset | (see `values.yaml`) | -| `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` | -| `tolerations` | Optional deployment tolerations | `[]` | -| `updateStrategy` | Optional update strategy | `type: RollingUpdate` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: - -```shell -$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni --values values.yaml -``` - -## Adopting the existing aws-node resources in an EKS cluster - -If you do not want to delete the existing aws-node resources in your cluster that run the aws-vpc-cni and then install this helm chart, you can adopt the resources into a release instead. This process is highlighted in this [PR comment](https://github.com/aws/eks-charts/issues/57#issuecomment-628403245). Once you have annotated and labeled all the resources this chart specifies, enable the `originalMatchLabels` flag, and also set `crd.create` to false on the helm release and run an update. If you have been careful this should not diff and leave all the resources unmodified and now under management of helm. - -Here is an example script to modify the existing resources: - -WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release. -``` -#!/usr/bin/env bash - -set -euo pipefail - -# don't import the crd. Helm cant manage the lifecycle of it anyway. -for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do - echo "setting annotations and labels on $kind/aws-node" - kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE - kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system - kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm -done -``` diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl deleted file mode 100644 index 230aed77..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/_helpers.tpl +++ /dev/null @@ -1,57 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "aws-vpc-cni.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "aws-vpc-cni.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "aws-vpc-cni.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "aws-vpc-cni.labels" -}} -app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} -helm.sh/chart: {{ include "aws-vpc-cni.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -k8s-app: aws-node -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "aws-vpc-cni.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "aws-vpc-cni.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml deleted file mode 100644 index 0635b5ed..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "aws-vpc-cni.fullname" . }} - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -rules: - - apiGroups: - - crd.k8s.amazonaws.com - resources: - - eniconfigs - verbs: ["list", "watch", "get"] - - apiGroups: [""] - resources: - - pods - - namespaces - verbs: ["list", "watch", "get"] - - apiGroups: [""] - resources: - - nodes - verbs: ["list", "watch", "get", "update"] - - apiGroups: ["extensions"] - resources: - - '*' - verbs: ["list", "watch"] diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml deleted file mode 100644 index 5cadd1b1..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "aws-vpc-cni.fullname" . }} - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "aws-vpc-cni.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "aws-vpc-cni.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml deleted file mode 100644 index 401a8c19..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/configmap.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.cniConfig.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "aws-vpc-cni.fullname" . }} - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -data: - 10-aws.conflist: {{ .Values.cniConfig.fileContents | b64enc }} -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml deleted file mode 100644 index bdd29e7a..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.crd.create -}} -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: eniconfigs.crd.k8s.amazonaws.com - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -spec: - scope: Cluster - group: crd.k8s.amazonaws.com - versions: - - name: v1alpha1 - served: true - storage: true - names: - plural: eniconfigs - singular: eniconfig - kind: ENIConfig -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml deleted file mode 100644 index 10388ef0..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml +++ /dev/null @@ -1,138 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ include "aws-vpc-cni.fullname" . }} - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -spec: - updateStrategy: -{{ toYaml .Values.updateStrategy | indent 4 }} - selector: - matchLabels: -{{- if .Values.originalMatchLabels }} - k8s-app: aws-node -{{- else }} - app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - template: - metadata: - {{- if .Values.podAnnotations }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - labels: - app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - k8s-app: aws-node - spec: - priorityClassName: "{{ .Values.priorityClassName }}" - serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }} - hostNetwork: true - initContainers: - - name: aws-vpc-cni-init - image: "{{- if .Values.init.image.override }}{{- .Values.init.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.init.image.region }}.amazonaws.com/amazon-k8s-cni-init:{{- .Values.init.image.tag }}{{- end}}" - imagePullPolicy: {{ .Values.init.image.pullPolicy }} - env: -{{- range $key, $value := .Values.init.env }} - - name: {{ $key }} - value: {{ $value | quote }} -{{- end }} - securityContext: - {{- toYaml .Values.init.securityContext | nindent 12 }} - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - terminationGracePeriodSeconds: 10 - tolerations: - - operator: Exists - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: aws-node - image: "{{- if .Values.image.override }}{{- .Values.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.image.region }}.amazonaws.com/amazon-k8s-cni:{{- .Values.image.tag }}{{- end}}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - containerPort: 61678 - name: metrics - livenessProbe: -{{ toYaml .Values.livenessProbe | indent 12 }} - readinessProbe: -{{ toYaml .Values.readinessProbe | indent 12 }} - env: -{{- range $key, $value := .Values.env }} - - name: {{ $key }} - value: {{ $value | quote }} -{{- end }} - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - resources: - {{- toYaml .Values.resources | nindent 12 }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir -{{- if .Values.cniConfig.enabled }} - # the dockerfile copies the baked in config to this location, lets overwrite it with ours - # the entrypoint.sh script will then copy our config to /host/etc/cni/net.d on boot - - name: cni-config - mountPath: /app/10-aws.conflist - subPath: 10-aws.conflist -{{- end }} - - mountPath: /host/var/log/aws-routed-eni - name: log-dir - - mountPath: /var/run/dockershim.sock - name: dockershim - - mountPath: /var/run/aws-node - name: run-dir - - mountPath: /run/xtables.lock - name: xtables-lock - volumes: - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d -{{- if .Values.cniConfig.enabled }} - - name: cni-config - configMap: - name: {{ include "aws-vpc-cni.fullname" . }} -{{- end }} - - name: dockershim - hostPath: - path: /var/run/dockershim.sock - - name: log-dir - hostPath: - path: /var/log/aws-routed-eni - type: DirectoryOrCreate - - name: run-dir - hostPath: - path: /var/run/aws-node - type: DirectoryOrCreate - - name: xtables-lock - hostPath: - path: /run/xtables.lock - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml deleted file mode 100644 index 6654ee60..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/eniconfig.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.eniConfig.create }} -{{- range $key, $value := (required ".Values.eniConfig.subnets must be specified" .Values.eniConfig.subnets) }} -apiVersion: crd.k8s.amazonaws.com/v1alpha1 -kind: ENIConfig -metadata: - name: {{ required ".Values.eniConfig.region must be specified" $.Values.eniConfig.region }}{{ $key }} -spec: - {{- if $value.securityGroups }} - securityGroups: - {{- range $sg := $value.securityGroups }} - - {{ $sg }} - {{- end }} - {{- end }} - subnet: {{ $value.id }} ---- -{{- end }} -{{- end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml deleted file mode 100644 index 88515669..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "aws-vpc-cni.serviceAccountName" . }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - labels: -{{ include "aws-vpc-cni.labels" . | indent 4 }} -{{- end -}} diff --git a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml deleted file mode 100644 index 84388b40..00000000 --- a/lib/aws-ec2/bootstrap/charts/aws-vpc-cni/values.yaml +++ /dev/null @@ -1,161 +0,0 @@ -# Default values for aws-vpc-cni. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# This default name override is to maintain backwards compatability with -# existing naming -nameOverride: aws-node - -init: - image: - tag: v1.7.5 - region: us-west-2 - pullPolicy: Always - # Set to use custom image - # override: "repo/org/image:tag" - env: - DISABLE_TCP_EARLY_DEMUX: "false" - securityContext: - privileged: true - -image: - region: us-west-2 - tag: v1.7.5 - pullPolicy: Always - # Set to use custom image - # override: "repo/org/image:tag" - -# The CNI supports a number of environment variable settings -# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables -env: - ADDITIONAL_ENI_TAGS: "{}" - AWS_VPC_CNI_NODE_PORT_SUPPORT: "true" - AWS_VPC_ENI_MTU: "9001" - AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false" - AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false" - AWS_VPC_K8S_CNI_EXTERNALSNAT: "false" - AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log" - AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG - AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng" - AWS_VPC_K8S_CNI_VETHPREFIX: eni - AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log" - AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG - DISABLE_INTROSPECTION: "false" - DISABLE_METRICS: "false" - ENABLE_POD_ENI: "false" - WARM_ENI_TARGET: "1" - -# this flag enables you to use the match label that was present in the original daemonset deployed by EKS -# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release -originalMatchLabels: false - -cniConfig: - enabled: false - fileContents: "" - -imagePullSecrets: [] - -fullnameOverride: "aws-node" - -priorityClassName: system-node-critical - -podSecurityContext: {} - -podAnnotations: {} - -securityContext: - capabilities: - add: - - "NET_ADMIN" - -crd: - create: true - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - annotations: {} - # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME - -livenessProbe: - exec: - command: - - /app/grpc-health-probe - - '-addr=:50051' - initialDelaySeconds: 60 - -readinessProbe: - exec: - command: - - /app/grpc-health-probe - - '-addr=:50051' - initialDelaySeconds: 1 - -resources: - requests: - cpu: 10m - -updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: "10%" - -nodeSelector: {} - -tolerations: [] - -affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "beta.kubernetes.io/os" - operator: In - values: - - linux - - key: "beta.kubernetes.io/arch" - operator: In - values: - - amd64 - - arm64 - - key: "eks.amazonaws.com/compute-type" - operator: NotIn - values: - - fargate - - matchExpressions: - - key: "kubernetes.io/os" - operator: In - values: - - linux - - key: "kubernetes.io/arch" - operator: In - values: - - amd64 - - arm64 - - key: "eks.amazonaws.com/compute-type" - operator: NotIn - values: - - fargate - -eniConfig: - # Specifies whether ENIConfigs should be created - create: false - region: us-west-2 - subnets: - # Key identifies the AZ - # Value contains the subnet ID and security group IDs within that AZ - # a: - # id: subnet-123 - # securityGroups: - # - sg-123 - # b: - # id: subnet-456 - # securityGroups: - # - sg-456 - # c: - # id: subnet-789 - # securityGroups: - # - sg-789 \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml deleted file mode 100644 index e0583ff2..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: iam-eks-user-mapper -description: A Helm chart for Kubernetes -type: application -version: 0.1.0 -appVersion: 0.1.0 diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl deleted file mode 100644 index 925c198e..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "iam-eks-user-mapper.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "iam-eks-user-mapper.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "iam-eks-user-mapper.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "iam-eks-user-mapper.labels" -}} -helm.sh/chart: {{ include "iam-eks-user-mapper.chart" . }} -{{ include "iam-eks-user-mapper.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "iam-eks-user-mapper.selectorLabels" -}} -app.kubernetes.io/name: {{ include "iam-eks-user-mapper.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "iam-eks-user-mapper.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "iam-eks-user-mapper.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml deleted file mode 100644 index c4c60995..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/deployment.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "iam-eks-user-mapper.fullname" . }} - namespace: kube-system - labels: - {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "iam-eks-user-mapper.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "iam-eks-user-mapper.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "iam-eks-user-mapper.serviceAccountName" . }} - automountServiceAccountToken: true - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: AWS_REGION - value: {{ .Values.aws.region }} - - name: AWS_ACCESS_KEY_ID - value: {{ .Values.aws.accessKey }} - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: awsKey - name: {{ include "iam-eks-user-mapper.fullname" . }} - command: - - ./app - - --aws-iam-group - - {{ .Values.syncIamGroup }} - - --k8s-cap - - system:masters - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml deleted file mode 100644 index 1c82ee2c..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/rbac.yaml +++ /dev/null @@ -1,24 +0,0 @@ -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: eks-configmap-modifier-role - namespace: kube-system -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "update"] - resourceNames: ["aws-auth"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: kube-system - name: eks-configmap-modifier-rolebinding -subjects: - - kind: ServiceAccount - name: {{ include "iam-eks-user-mapper.serviceAccountName" . }} - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: eks-configmap-modifier-role \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml deleted file mode 100644 index ca159331..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "iam-eks-user-mapper.fullname" . }} - namespace: kube-system - labels: - {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} -type: Opaque -data: - awsKey: {{ .Values.aws.secretKey | b64enc }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml deleted file mode 100644 index 056ec349..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "iam-eks-user-mapper.serviceAccountName" . }} - namespace: kube-system - labels: - {{- include "iam-eks-user-mapper.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml b/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml deleted file mode 100644 index 82fc53dd..00000000 --- a/lib/aws-ec2/bootstrap/charts/iam-eks-user-mapper/values.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Default values for iam-eks-user-mapper. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - repository: ygrene/iam-eks-user-mapper - pullPolicy: IfNotPresent - tag: "latest" - -aws: - accessKey: "" - secretKey: "" - region: "" - -syncIamGroup: "" - -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "iam-eks-user-mapper" - -labels: - app: iam-eks-user-mapper - -selectorLabels: - app: iam-eks-user-mapper - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/lib/aws-ec2/bootstrap/documentdb.tf b/lib/aws-ec2/bootstrap/documentdb.tf index 7828faf5..292fb78a 100644 --- a/lib/aws-ec2/bootstrap/documentdb.tf +++ b/lib/aws-ec2/bootstrap/documentdb.tf @@ -66,16 +66,4 @@ resource "aws_docdb_subnet_group" "documentdb" { subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) tags = local.tags_documentdb -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "documentdb_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow DocumentDB incoming access from anywhere" - from_port = 27017 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 27017 - type = "ingress" -} +} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf index f8c94814..5475a38f 100644 --- a/lib/aws-ec2/bootstrap/ec2-sec-group.tf +++ b/lib/aws-ec2/bootstrap/ec2-sec-group.tf @@ -21,4 +21,20 @@ resource "aws_security_group_rule" "https" { security_group_id = aws_security_group.ec2_instance.id to_port = 443 type = "ingress" +} + +# randomize inbound kubernetes port number for more security +resource "random_integer" "kubernetes_external_port" { + min = 1024 + max = 65534 +} + +resource "aws_security_group_rule" "kubernetes" { + cidr_blocks = ["0.0.0.0/0"] + description = "Kubernetes connectivity" + from_port = random_integer.kubernetes_external_port.result + protocol = "tcp" + security_group_id = aws_security_group.ec2_instance.id + to_port = random_integer.kubernetes_external_port.result + type = "ingress" } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.tf similarity index 66% rename from lib/aws-ec2/bootstrap/ec2.j2.tf rename to lib/aws-ec2/bootstrap/ec2.tf index 756276ef..dbeb3cc5 100644 --- a/lib/aws-ec2/bootstrap/ec2.j2.tf +++ b/lib/aws-ec2/bootstrap/ec2.tf @@ -36,6 +36,7 @@ resource "aws_instance" "ec2_instance" { security_groups = [aws_security_group.ec2_instance.id] user_data = local.bootstrap + user_data_replace_on_change = false tags = merge( local.tags_common, @@ -43,6 +44,10 @@ resource "aws_instance" "ec2_instance" { "Service" = "EC2" } ) + + depends_on = [ + aws_s3_bucket.kubeconfigs_bucket + ] } resource "time_static" "on_ec2_create" {} @@ -50,12 +55,16 @@ resource "time_static" "on_ec2_create" {} locals { bootstrap = <> /etc/profile @@ -63,6 +72,10 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do echo "kubeconfig is not yet present, sleeping" sleep 1 done -s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put /etc/rancher/k3s/k3s.yaml s3://${var.s3_bucket_kubeconfig}/${var.kubernetes_cluster_id}.yaml + +# Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap +sed -r "s/127.0.0.1:6443/$(curl -s http://169.254.169.254/latest/meta-data/public-hostname):${random_integer.kubernetes_external_port.result}/g" /etc/rancher/k3s/k3s.yaml > $KUBECONFIG_PATH +s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME +rm -f $KUBECONFIG_PATH BOOTSTRAP } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/elasticcache.tf b/lib/aws-ec2/bootstrap/elasticcache.tf index 2e703dac..b4a37b16 100644 --- a/lib/aws-ec2/bootstrap/elasticcache.tf +++ b/lib/aws-ec2/bootstrap/elasticcache.tf @@ -65,16 +65,4 @@ resource "aws_elasticache_subnet_group" "elasticache" { # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly name = "elasticache-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "elasticache_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow Redis incoming access from anywhere" - from_port = 6379 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 6379 - type = "ingress" -} +} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/rds.tf b/lib/aws-ec2/bootstrap/rds.tf index a62f5c20..06eac9d9 100644 --- a/lib/aws-ec2/bootstrap/rds.tf +++ b/lib/aws-ec2/bootstrap/rds.tf @@ -93,26 +93,4 @@ resource "aws_iam_role" "rds_enhanced_monitoring" { resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" { role = aws_iam_role.rds_enhanced_monitoring.name policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole" -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "postgres_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow RDS PostgreSQL incoming access from anywhere" - from_port = 5432 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 5432 - type = "ingress" -} - -resource "aws_security_group_rule" "mysql_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow RDS MySQL incoming access from anywhere" - from_port = 3306 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 3306 - type = "ingress" -} +} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf b/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf index b5680921..4abe8bc5 100644 --- a/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf +++ b/lib/aws-ec2/bootstrap/s3-qovery-buckets.tf @@ -1,11 +1,7 @@ // S3 bucket to store kubeconfigs resource "aws_s3_bucket" "kubeconfigs_bucket" { bucket = var.s3_bucket_kubeconfig - acl = "private" force_destroy = true - versioning { - enabled = true - } tags = merge( local.tags_ec2, @@ -13,17 +9,30 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" { "Name" = "Kubernetes kubeconfig" } ) +} - server_side_encryption_configuration { - rule { - apply_server_side_encryption_by_default { - kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn - sse_algorithm = "aws:kms" - } +resource "aws_s3_bucket_acl" "kubeconfigs_bucket_acl" { + bucket = aws_s3_bucket.kubeconfigs_bucket.id + acl = "private" +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "kubeconfigs_bucket_encryption" { + bucket = aws_s3_bucket.kubeconfigs_bucket.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn + sse_algorithm = "aws:kms" } } } +resource "aws_s3_bucket_versioning" "kubeconfigs_bucket_versionning" { + bucket = aws_s3_bucket.kubeconfigs_bucket.id + versioning_configuration { + status = "Enabled" + } +} + resource "aws_kms_key" "s3_kubeconfig_kms_encryption" { description = "s3 kubeconfig encryption" tags = merge( diff --git a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs new file mode 100644 index 00000000..4dd386a8 --- /dev/null +++ b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs @@ -0,0 +1,433 @@ +use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode}; +use crate::cloud_provider::helm::{ + get_chart_for_cluster_agent, get_chart_for_shell_agent, ChartInfo, ChartSetValue, ClusterAgentContext, CommonChart, + CoreDNSConfigChart, HelmChart, HelmChartNamespaces, ShellAgentContext, +}; +use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName}; +use crate::errors::CommandError; +use serde::{Deserialize, Serialize}; +use std::fs::File; +use std::io::BufReader; +use std::path::Path; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AwsEc2QoveryTerraformConfig { + pub loki_storage_config_aws_s3: String, + pub aws_iam_loki_storage_key: String, + pub aws_iam_loki_storage_secret: String, +} + +pub struct Ec2ChartsConfigPrerequisites { + pub organization_id: String, + pub organization_long_id: uuid::Uuid, + pub cluster_id: String, + pub cluster_long_id: uuid::Uuid, + pub region: String, + pub cluster_name: String, + pub cloud_provider: String, + pub test_cluster: bool, + pub aws_access_key_id: String, + pub aws_secret_access_key: String, + pub vpc_qovery_network_mode: VpcQoveryNetworkMode, + pub qovery_engine_location: EngineLocation, + pub ff_log_history_enabled: bool, + pub ff_metrics_history_enabled: bool, + pub managed_dns_name: String, + pub managed_dns_helm_format: String, + pub managed_dns_resolvers_terraform_format: String, + pub external_dns_provider: String, + pub dns_email_report: String, + pub acme_url: String, + pub cloudflare_email: String, + pub cloudflare_api_token: String, + pub disable_pleco: bool, + // qovery options form json input + pub infra_options: Options, +} + +pub fn ec2_aws_helm_charts( + qovery_terraform_config_file: &str, + chart_config_prerequisites: &Ec2ChartsConfigPrerequisites, + chart_prefix_path: Option<&str>, + kubernetes_config: &Path, + envs: &[(String, String)], +) -> Result>>, CommandError> { + let content_file = match File::open(&qovery_terraform_config_file) { + Ok(x) => x, + Err(e) => { + return Err(CommandError::new( + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + Some(envs.to_vec()), + )); + } + }; + let chart_prefix = chart_prefix_path.unwrap_or("./"); + let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; + let reader = BufReader::new(content_file); + let qovery_terraform_config: AwsEc2QoveryTerraformConfig = match serde_json::from_reader(reader) { + Ok(config) => config, + Err(e) => { + return Err(CommandError::new( + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + Some(envs.to_vec()), + )); + } + }; + + // Qovery storage class + let q_storage_class = CommonChart { + chart_info: ChartInfo { + name: "q-storageclass".to_string(), + path: chart_path("/charts/q-storageclass"), + ..Default::default() + }, + }; + + // Calico for AWS + let aws_calico = CommonChart { + chart_info: ChartInfo { + name: "calico".to_string(), + path: chart_path("charts/aws-calico"), + ..Default::default() + }, + }; + + let coredns_config = CoreDNSConfigChart { + chart_info: ChartInfo { + name: "coredns".to_string(), + path: chart_path("/charts/coredns-config"), + values: vec![ + ChartSetValue { + key: "managed_dns".to_string(), + value: chart_config_prerequisites.managed_dns_helm_format.clone(), + }, + ChartSetValue { + key: "managed_dns_resolvers".to_string(), + value: chart_config_prerequisites + .managed_dns_resolvers_terraform_format + .clone(), + }, + ], + ..Default::default() + }, + }; + + let external_dns = CommonChart { + chart_info: ChartInfo { + name: "externaldns".to_string(), + path: chart_path("common/charts/external-dns"), + values_files: vec![chart_path("chart_values/external-dns.yaml")], + values: vec![ + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "50Mi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "50Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cert_manager = CommonChart { + chart_info: ChartInfo { + name: "cert-manager".to_string(), + path: chart_path("common/charts/cert-manager"), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + // https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + ChartSetValue { + key: "extraArgs".to_string(), + value: "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}" + .to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.enabled".to_string(), + // Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require + // prometheus to be already installed + value: "false".to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.prometheusInstance".to_string(), + value: "qovery".to_string(), + }, + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "1Gi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "1Gi".to_string(), + }, + // Webhooks resources limits + ChartSetValue { + key: "webhook.resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "webhook.resources.requests.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "webhook.resources.limits.memory".to_string(), + value: "128Mi".to_string(), + }, + ChartSetValue { + key: "webhook.resources.requests.memory".to_string(), + value: "128Mi".to_string(), + }, + // Cainjector resources limits + ChartSetValue { + key: "cainjector.resources.limits.cpu".to_string(), + value: "500m".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.limits.memory".to_string(), + value: "1Gi".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.requests.memory".to_string(), + value: "1Gi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let mut cert_manager_config = CommonChart { + chart_info: ChartInfo { + name: "cert-manager-configs".to_string(), + path: chart_path("common/charts/cert-manager-configs"), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "externalDnsProvider".to_string(), + value: chart_config_prerequisites.external_dns_provider.clone(), + }, + ChartSetValue { + key: "acme.letsEncrypt.emailReport".to_string(), + value: chart_config_prerequisites.dns_email_report.clone(), + }, + ChartSetValue { + key: "acme.letsEncrypt.acmeUrl".to_string(), + value: chart_config_prerequisites.acme_url.clone(), + }, + ChartSetValue { + key: "managedDns".to_string(), + value: chart_config_prerequisites.managed_dns_helm_format.clone(), + }, + ], + ..Default::default() + }, + }; + if chart_config_prerequisites.external_dns_provider == "cloudflare" { + cert_manager_config.chart_info.values.push(ChartSetValue { + key: "provider.cloudflare.apiToken".to_string(), + value: chart_config_prerequisites.cloudflare_api_token.clone(), + }); + cert_manager_config.chart_info.values.push(ChartSetValue { + key: "provider.cloudflare.email".to_string(), + value: chart_config_prerequisites.cloudflare_email.clone(), + }) + } + + let nginx_ingress = CommonChart { + chart_info: ChartInfo { + name: "nginx-ingress".to_string(), + path: chart_path("common/charts/ingress-nginx"), + namespace: HelmChartNamespaces::NginxIngress, + // Because of NLB, svc can take some time to start + timeout_in_seconds: 300, + values_files: vec![chart_path("chart_values/nginx-ingress.yaml")], + values: vec![ + // Controller resources limits + ChartSetValue { + key: "controller.resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "controller.resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "controller.resources.limits.memory".to_string(), + value: "768Mi".to_string(), + }, + ChartSetValue { + key: "controller.resources.requests.memory".to_string(), + value: "768Mi".to_string(), + }, + // Default backend resources limits + ChartSetValue { + key: "defaultBackend.resources.limits.cpu".to_string(), + value: "20m".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.requests.cpu".to_string(), + value: "10m".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.limits.memory".to_string(), + value: "32Mi".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.requests.memory".to_string(), + value: "32Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cluster_agent_context = ClusterAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; + + let shell_context = ShellAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?; + + let qovery_agent_version: QoveryAgent = get_qovery_app_version( + QoveryAppName::Agent, + &chart_config_prerequisites.infra_options.agent_version_controller_token, + &chart_config_prerequisites.infra_options.qovery_api_url, + &chart_config_prerequisites.cluster_id, + )?; + + let mut qovery_agent = CommonChart { + chart_info: ChartInfo { + name: "qovery-agent".to_string(), + path: chart_path("common/charts/qovery/qovery-agent"), + namespace: HelmChartNamespaces::Qovery, + values: vec![ + ChartSetValue { + key: "image.tag".to_string(), + value: qovery_agent_version.version, + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "environmentVariables.GRPC_SERVER".to_string(), + value: chart_config_prerequisites.infra_options.qovery_grpc_url.to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_TOKEN".to_string(), + value: chart_config_prerequisites + .infra_options + .qovery_cluster_secret_token + .to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_ID".to_string(), + value: chart_config_prerequisites.cluster_long_id.to_string(), + }, + ChartSetValue { + key: "environmentVariables.ORGANIZATION_ID".to_string(), + value: chart_config_prerequisites.organization_long_id.to_string(), + }, + ChartSetValue { + key: "environmentVariables.LOKI_URL".to_string(), + value: format!("http://{}.cluster.local:3100", "not-installed"), + }, + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "500Mi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "500Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + if chart_config_prerequisites.ff_log_history_enabled { + qovery_agent.chart_info.values.push(ChartSetValue { + key: "environmentVariables.FEATURES".to_string(), + value: "LogsHistory".to_string(), + }) + } + + // chart deployment order matters!!! + let level_1: Vec> = vec![Box::new(q_storage_class), Box::new(coredns_config)]; + + let level_2: Vec> = vec![Box::new(cert_manager)]; + + let level_3: Vec> = vec![]; + + let level_4: Vec> = vec![Box::new(aws_calico)]; + + let level_5: Vec> = vec![Box::new(external_dns)]; + + let level_6: Vec> = vec![Box::new(nginx_ingress)]; + + let level_7: Vec> = vec![ + Box::new(cert_manager_config), + Box::new(qovery_agent), // TODO: Migrate to the new cluster agent + Box::new(cluster_agent), + Box::new(shell_agent), + ]; + + info!("charts configuration preparation finished"); + Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6, level_7]) +} diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs similarity index 99% rename from src/cloud_provider/aws/kubernetes/helm_charts.rs rename to src/cloud_provider/aws/kubernetes/eks_helm_charts.rs index e4595d2e..78469eb7 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs @@ -16,7 +16,7 @@ use std::thread::sleep; use std::time::Duration; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AwsQoveryTerraformConfig { +pub struct AwsEksQoveryTerraformConfig { pub aws_iam_eks_user_mapper_key: String, pub aws_iam_eks_user_mapper_secret: String, pub aws_iam_cluster_autoscaler_key: String, @@ -28,7 +28,7 @@ pub struct AwsQoveryTerraformConfig { pub aws_iam_loki_storage_secret: String, } -pub struct ChartsConfigPrerequisites { +pub struct EksChartsConfigPrerequisites { pub organization_id: String, pub organization_long_id: uuid::Uuid, pub cluster_id: String, @@ -56,9 +56,9 @@ pub struct ChartsConfigPrerequisites { pub infra_options: Options, } -pub fn aws_helm_charts( +pub fn eks_aws_helm_charts( qovery_terraform_config_file: &str, - chart_config_prerequisites: &ChartsConfigPrerequisites, + chart_config_prerequisites: &EksChartsConfigPrerequisites, chart_prefix_path: Option<&str>, kubernetes_config: &Path, envs: &[(String, String)], @@ -76,7 +76,7 @@ pub fn aws_helm_charts( let chart_prefix = chart_prefix_path.unwrap_or("./"); let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; let reader = BufReader::new(content_file); - let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) { + let qovery_terraform_config: AwsEksQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { return Err(CommandError::new( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 4ef90fbe..baffe7d7 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -8,12 +8,13 @@ use retry::OperationResult; use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; -use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites}; +use crate::cloud_provider::aws::kubernetes::ec2_helm_charts::{ec2_aws_helm_charts, Ec2ChartsConfigPrerequisites}; +use crate::cloud_provider::aws::kubernetes::eks_helm_charts::{eks_aws_helm_charts, EksChartsConfigPrerequisites}; use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create; use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo}; use crate::cloud_provider::kubernetes::{ - is_kubernetes_upgrade_required, uninstall_cert_manager, Kubernetes, ProviderOptions, + is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, ProviderOptions, }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; @@ -32,8 +33,9 @@ use crate::object_storage::s3::S3; use crate::string::terraform_list_format; pub mod ec2; +mod ec2_helm_charts; pub mod eks; -pub mod helm_charts; +pub mod eks_helm_charts; pub mod node; pub mod roles; @@ -606,36 +608,6 @@ fn create( &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => kubernetes.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )); - } - } - }; - } - } - } - Err(e) => kubernetes.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), kubernetes.context().is_dry_run_deploy()) { return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); @@ -653,46 +625,97 @@ fn create( .map(|x| (x.0.to_string(), x.1.to_string())) .collect(); - let charts_prerequisites = ChartsConfigPrerequisites { - organization_id: kubernetes.cloud_provider().organization_id().to_string(), - organization_long_id: kubernetes.cloud_provider().organization_long_id(), - infra_options: options.clone(), - cluster_id: kubernetes.id().to_string(), - cluster_long_id: kubernetes_long_id, - region: kubernetes.region(), - cluster_name: kubernetes.cluster_name(), - cloud_provider: "aws".to_string(), - test_cluster: kubernetes.context().is_test_cluster(), - aws_access_key_id: kubernetes.cloud_provider().access_key_id(), - aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), - vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), - qovery_engine_location: options.qovery_engine_location.clone(), - ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), - ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), - managed_dns_name: kubernetes.dns_provider().domain().to_string(), - managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), - managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(kubernetes.dns_provider()), - external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), - dns_email_report: options.tls_email_report.clone(), - acme_url: lets_encrypt_url(kubernetes.context()), - cloudflare_email: kubernetes.dns_provider().account().to_string(), - cloudflare_api_token: kubernetes.dns_provider().token().to_string(), - disable_pleco: kubernetes.context().disable_pleco(), - }; - kubernetes.logger().log(EngineEvent::Info( event_details.clone(), EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), )); - let helm_charts_to_deploy = aws_helm_charts( - format!("{}/qovery-tf-config.json", &temp_dir).as_str(), - &charts_prerequisites, - Some(&temp_dir), - kubeconfig_path, - &credentials_environment_variables, - ) - .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; + let helm_charts_to_deploy = match kubernetes.kind() { + Kind::Eks => { + let charts_prerequisites = EksChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format( + kubernetes.dns_provider(), + ), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + eks_aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))? + } + Kind::Ec2 => { + let charts_prerequisites = Ec2ChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format( + kubernetes.dns_provider(), + ), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + ec2_aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))? + } + _ => { + let safe_message = format!("unsupported requested cluster type: {}", kubernetes.kind()); + return Err(EngineError::new_unsupported_cluster_kind( + event_details, + &safe_message, + CommandError::new(safe_message.to_string(), None, None), + )); + } + }; deploy_charts_levels( kubeconfig_path, diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 4afe105d..c75a6eef 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -31,7 +31,7 @@ use crate::cmd::helm::{to_engine_error, Helm}; use crate::cmd::kubectl::{ do_kubectl_exec_get_loadbalancer_id, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, }; -use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; +use crate::cmd::terraform::terraform_init_validate_plan_apply; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; @@ -540,36 +540,6 @@ impl DOKS { &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { let error = diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index b8d9e693..8bf77758 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -664,36 +664,6 @@ impl Kapsule { &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // TODO(benjaminch): move this elsewhere // Create object-storage buckets self.logger().log(EngineEvent::Info( diff --git a/src/errors/io.rs b/src/errors/io.rs index fa895a2c..217bdcd4 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -73,6 +73,7 @@ pub enum Tag { HelmHistoryError, CannotGetAnyAvailableVPC, UnsupportedVersion, + UnsupportedClusterKind, CannotGetSupportedVersions, CannotGetCluster, ContainerRegistryError, @@ -224,6 +225,7 @@ impl From for Tag { } errors::Tag::BuilderError => Tag::BuilderError, errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError, + errors::Tag::UnsupportedClusterKind => Tag::UnsupportedClusterKind, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index dd47f0a8..05316c65 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -175,6 +175,8 @@ pub enum Tag { CannotGetWorkspaceDirectory, /// UnsupportedInstanceType: represents an unsupported instance type for the given cloud provider. UnsupportedInstanceType, + /// UnsupportedClusterKind: represents an unsupported cluster kind by Qovery. + UnsupportedClusterKind, /// UnsupportedRegion: represents an unsupported region for the given cloud provider. UnsupportedRegion, /// UnsupportedZone: represents an unsupported zone in region for the given cloud provider. @@ -623,6 +625,32 @@ impl EngineError { ) } + /// Creates new error for unsupported cluster kind. + /// + /// Qovery doesn't support this kind of clusters. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `requested_kind`: Raw requested instance type string. + /// * `error_message`: Raw error message. + pub fn new_unsupported_cluster_kind( + event_details: EventDetails, + new_unsupported_cluster_kind: &str, + error_message: CommandError, + ) -> EngineError { + let message = format!("`{}` cluster kind is not supported", new_unsupported_cluster_kind); + EngineError::new( + event_details, + Tag::UnsupportedClusterKind, + message.to_string(), + message, + Some(error_message), + None, // TODO(documentation): Create a page entry to details this error + Some("Selected cluster kind is not supported, please check Qovery's documentation.".to_string()), + ) + } + /// Creates new error for unsupported region. /// /// Cloud provider doesn't support the requested region. diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 8f60bb8f..6985e106 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -12,8 +12,6 @@ use std::str::FromStr; use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; -pub const _AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; - #[cfg(feature = "test-aws-infra-ec2")] fn create_and_destroy_aws_ec2_k3s_cluster( region: String, From 5320858e6715d379a0a7e6d1c50f3308429db3a3 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Mon, 2 May 2022 00:27:49 +0200 Subject: [PATCH 099/122] fix: fix several issues --- lib/aws-ec2/bootstrap/ec2-sec-group.tf | 50 +++++++++---------- lib/aws-ec2/bootstrap/{ec2.tf => ec2.j2.tf} | 18 ++++--- lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf | 11 +--- lib/aws-ec2/bootstrap/tf-default-vars.j2.tf | 2 +- .../aws/kubernetes/ec2_helm_charts.rs | 5 +- 5 files changed, 41 insertions(+), 45 deletions(-) rename lib/aws-ec2/bootstrap/{ec2.tf => ec2.j2.tf} (74%) diff --git a/lib/aws-ec2/bootstrap/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf index 5475a38f..a37b260b 100644 --- a/lib/aws-ec2/bootstrap/ec2-sec-group.tf +++ b/lib/aws-ec2/bootstrap/ec2-sec-group.tf @@ -1,3 +1,9 @@ +# randomize inbound kubernetes port number for more security +resource "random_integer" "kubernetes_external_port" { + min = 1024 + max = 65534 +} + resource "aws_security_group" "ec2_instance" { name = "qovery-ec2-${var.kubernetes_cluster_id}" description = "Cluster communication with worker nodes" @@ -10,31 +16,23 @@ resource "aws_security_group" "ec2_instance" { cidr_blocks = ["0.0.0.0/0"] } + // nginx ingress + ingress { + description = "HTTPS connectivity" + from_port = 443 + protocol = "tcp" + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] + } + + // kubernetes + ingress { + description = "Kubernetes connectivity" + from_port = random_integer.kubernetes_external_port.result + protocol = "tcp" + to_port = random_integer.kubernetes_external_port.result + cidr_blocks = ["0.0.0.0/0"] + } + tags = local.tags_ec2 -} - -resource "aws_security_group_rule" "https" { - cidr_blocks = ["0.0.0.0/0"] - description = "HTTPS connectivity" - from_port = 443 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 443 - type = "ingress" -} - -# randomize inbound kubernetes port number for more security -resource "random_integer" "kubernetes_external_port" { - min = 1024 - max = 65534 -} - -resource "aws_security_group_rule" "kubernetes" { - cidr_blocks = ["0.0.0.0/0"] - description = "Kubernetes connectivity" - from_port = random_integer.kubernetes_external_port.result - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = random_integer.kubernetes_external_port.result - type = "ingress" } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf similarity index 74% rename from lib/aws-ec2/bootstrap/ec2.tf rename to lib/aws-ec2/bootstrap/ec2.j2.tf index dbeb3cc5..62c5a17b 100644 --- a/lib/aws-ec2/bootstrap/ec2.tf +++ b/lib/aws-ec2/bootstrap/ec2.j2.tf @@ -33,10 +33,14 @@ resource "aws_instance" "ec2_instance" { # security vpc_security_group_ids = [aws_security_group.ec2_instance.id] subnet_id = aws_subnet.ec2_zone_a[0].id - security_groups = [aws_security_group.ec2_instance.id] user_data = local.bootstrap - user_data_replace_on_change = false + user_data_replace_on_change = true + +# lifecycle { +# // user data changes, forces to restart the EC2 instance +# ignore_changes = [user_data] +# } tags = merge( local.tags_common, @@ -57,7 +61,7 @@ locals { #!/bin/bash export KUBECONFIG_FILENAME="${var.kubernetes_cluster_id}.yaml" -export KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME" +export NEW_KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME" apt-get update apt-get -y install curl s3cmd @@ -74,8 +78,10 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do done # Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap -sed -r "s/127.0.0.1:6443/$(curl -s http://169.254.169.254/latest/meta-data/public-hostname):${random_integer.kubernetes_external_port.result}/g" /etc/rancher/k3s/k3s.yaml > $KUBECONFIG_PATH -s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME -rm -f $KUBECONFIG_PATH +public_hostname="$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)" +sed "s/127.0.0.1/$public_hostname/g" /etc/rancher/k3s/k3s.yaml > $NEW_KUBECONFIG_PATH +sed -i "s/:6443/:${random_integer.kubernetes_external_port.result}/g" $NEW_KUBECONFIG_PATH +s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $NEW_KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME +rm -f $NEW_KUBECONFIG_PATH BOOTSTRAP } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf index 0044f5f0..9bec1658 100644 --- a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf +++ b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf @@ -1,15 +1,8 @@ locals { qovery_tf_config = < Date: Tue, 3 May 2022 15:01:54 +0200 Subject: [PATCH 100/122] tests: fix sccache release retrieval (#705) --- .github/workflows/tests.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7eb52ed9..ba465df9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -21,20 +21,23 @@ jobs: - uses: hashicorp/setup-terraform@v1 with: terraform_version: 0.14.10 + - uses: sergeysova/jq-action@v2 + id: sccache_release + with: + cmd: 'curl --silent "https://api.github.com/repos/Qovery/sccache-bin/releases/latest" | jq .tag_name' + multiline: false - name: build-linter-utests run: | echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) rustup component add clippy cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) - export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache export SCCACHE_REDIS=${{ secrets.SCCACHE_REDIS }} export TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache mkdir -p $GITHUB_WORKSPACE/bin $HOME/.terraform.d/plugin-cache - sccache_release=$(curl --silent "https://github.com/Qovery/sccache-bin/releases/latest" | sed -r 's/^.+tag\/(.+)">.+/\1/') - curl -sLo $GITHUB_WORKSPACE/bin/sccache https://github.com/Qovery/sccache-bin/releases/download/${sccache_release}/sccache + curl -sLo $GITHUB_WORKSPACE/bin/sccache https://github.com/Qovery/sccache-bin/releases/download/${{ steps.sccache_release.outputs.value }}/sccache chmod 755 $GITHUB_WORKSPACE/bin/sccache echo "########## SHARED CACHE STATUS ##########" sccache --version From 9d832b07455fbd0faf7768f8aa412a9b5e894740 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Tue, 3 May 2022 15:25:02 +0200 Subject: [PATCH 101/122] feat: adding EC2 port open check to avoid random issues --- .../aws/kubernetes/ec2_helm_charts.rs | 59 +++++++---- src/cloud_provider/aws/kubernetes/mod.rs | 39 +++++++- src/cloud_provider/utilities.rs | 99 ++++++++++++++++++- src/errors/io.rs | 2 + src/errors/mod.rs | 22 +++++ 5 files changed, 196 insertions(+), 25 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs index 61e14668..bb5be4dd 100644 --- a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs @@ -16,6 +16,18 @@ pub struct AwsEc2QoveryTerraformConfig { pub aws_ec2_kubernetes_port: String, } +impl AwsEc2QoveryTerraformConfig { + pub fn kubernetes_port_to_u16(&self) -> Result { + match self.aws_ec2_kubernetes_port.parse::() { + Ok(x) => Ok(x), + Err(e) => Err(format!( + "error while trying to convert kubernetes port from string {} to int: {}", + self.aws_ec2_kubernetes_port, e + )), + } + } +} + pub struct Ec2ChartsConfigPrerequisites { pub organization_id: String, pub organization_long_id: uuid::Uuid, @@ -44,6 +56,31 @@ pub struct Ec2ChartsConfigPrerequisites { pub infra_options: Options, } +pub fn get_aws_ec2_qovery_terraform_config( + qovery_terraform_config_file: &str, +) -> Result { + let content_file = match File::open(&qovery_terraform_config_file) { + Ok(x) => x, + Err(e) => { + return Err(CommandError::new( + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + None, + )); + } + }; + + let reader = BufReader::new(content_file); + match serde_json::from_reader(reader) { + Ok(config) => Ok(config), + Err(e) => Err(CommandError::new( + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + None, + )), + } +} + pub fn ec2_aws_helm_charts( qovery_terraform_config_file: &str, chart_config_prerequisites: &Ec2ChartsConfigPrerequisites, @@ -51,29 +88,9 @@ pub fn ec2_aws_helm_charts( kubernetes_config: &Path, envs: &[(String, String)], ) -> Result>>, CommandError> { - let content_file = match File::open(&qovery_terraform_config_file) { - Ok(x) => x, - Err(e) => { - return Err(CommandError::new( - "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), - Some(e.to_string()), - Some(envs.to_vec()), - )); - } - }; let chart_prefix = chart_prefix_path.unwrap_or("./"); let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; - let reader = BufReader::new(content_file); - let qovery_terraform_config: AwsEc2QoveryTerraformConfig = match serde_json::from_reader(reader) { - Ok(config) => config, - Err(e) => { - return Err(CommandError::new( - format!("Error while parsing terraform config file {}", qovery_terraform_config_file), - Some(e.to_string()), - Some(envs.to_vec()), - )); - } - }; + let qovery_terraform_config = get_aws_ec2_qovery_terraform_config(qovery_terraform_config_file)?; // Qovery storage class let q_storage_class = CommonChart { diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index baffe7d7..0051c55e 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -8,7 +8,9 @@ use retry::OperationResult; use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; -use crate::cloud_provider::aws::kubernetes::ec2_helm_charts::{ec2_aws_helm_charts, Ec2ChartsConfigPrerequisites}; +use crate::cloud_provider::aws::kubernetes::ec2_helm_charts::{ + ec2_aws_helm_charts, get_aws_ec2_qovery_terraform_config, Ec2ChartsConfigPrerequisites, +}; use crate::cloud_provider::aws::kubernetes::eks_helm_charts::{eks_aws_helm_charts, EksChartsConfigPrerequisites}; use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create; use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; @@ -18,6 +20,7 @@ use crate::cloud_provider::kubernetes::{ }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; +use crate::cloud_provider::utilities::{wait_until_port_is_open, TcpCheckSource}; use crate::cloud_provider::CloudProvider; use crate::cmd; use crate::cmd::helm::{to_engine_error, Helm}; @@ -613,8 +616,40 @@ fn create( return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } + // wait for AWS EC2 K3S port is open to avoid later deployment issues (and kubeconfig not available on S3) + match kubernetes.kind() { + Kind::Ec2 => { + let qovery_teraform_config = + get_aws_ec2_qovery_terraform_config(format!("{}/qovery-tf-config.json", &temp_dir).as_str()) + .map_err(|e| EngineError::new_terraform_qovery_config_mismatch(event_details.clone(), e))?; + + let port = qovery_teraform_config.kubernetes_port_to_u16().map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( + event_details.clone(), + CommandError::new_from_safe_message(e), + ) + })?; + + wait_until_port_is_open( + &TcpCheckSource::DnsName(qovery_teraform_config.aws_ec2_public_hostname.as_str()), + port, + 300, + kubernetes.logger(), + event_details.clone(), + ) + .map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( + event_details.clone(), + CommandError::new(format!( + "Wasn't able to connect to Kubernetes API, can't continue. Did you manually performed changes AWS side?" + ), Some(format!("{:?}", e)), None), + ) + })?; + } + _ => {} + } + // kubernetes helm deployments on the cluster - // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform let kubeconfig_path = kubernetes.get_kubeconfig_file_path()?; let kubeconfig_path = Path::new(&kubeconfig_path); diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 5a0949c0..9a7b49f7 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -9,7 +9,10 @@ use core::option::Option::{None, Some}; use core::result::Result; use core::result::Result::{Err, Ok}; use retry::delay::Fixed; -use retry::OperationResult; +use retry::{Error, OperationResult}; +use std::fmt; +use std::net::ToSocketAddrs; +use std::net::{SocketAddr, TcpStream as NetTcpStream}; use trust_dns_resolver::config::*; use trust_dns_resolver::proto::rr::{RData, RecordType}; use trust_dns_resolver::Resolver; @@ -244,6 +247,78 @@ pub fn managed_db_name_sanitizer(max_size: usize, prefix: &str, name: &str) -> S new_name } +#[derive(PartialEq, Debug)] +pub enum TcpCheckErrors { + DomainNotResolvable, + PortNotOpen, + UnknownError, +} + +pub enum TcpCheckSource<'a> { + SocketAddr(SocketAddr), + DnsName(&'a str), +} + +impl fmt::Display for TcpCheckSource<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TcpCheckSource::SocketAddr(x) => write!(f, "{}", x), + TcpCheckSource::DnsName(x) => write!(f, "{}", x), + } + } +} + +pub fn check_tcp_port_is_open(address: &TcpCheckSource, port: u16) -> Result<(), TcpCheckErrors> { + let timeout = core::time::Duration::from_secs(1); + + let ip = match address { + TcpCheckSource::SocketAddr(x) => x.clone(), + TcpCheckSource::DnsName(x) => { + let address = format!("{}:{}", x, port); + match address.to_socket_addrs() { + Ok(x) => { + let ips: Vec = x.collect(); + ips[0] + } + Err(_) => return Err(TcpCheckErrors::DomainNotResolvable), + } + } + }; + + match NetTcpStream::connect_timeout(&ip, timeout) { + Ok(_) => Ok(()), + Err(_) => Err(TcpCheckErrors::PortNotOpen), + } +} + +pub fn wait_until_port_is_open( + address: &TcpCheckSource, + port: u16, + max_timeout: usize, + logger: &dyn Logger, + event_details: EventDetails, +) -> Result<(), TcpCheckErrors> { + let fixed_iterable = Fixed::from(core::time::Duration::from_secs(1)).take(max_timeout); + let check_result = retry::retry(fixed_iterable, || match check_tcp_port_is_open(address, port) { + Ok(_) => OperationResult::Ok(()), + Err(e) => { + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("{}:{} is still not ready: {:?}. retrying...", address, port, e)), + )); + OperationResult::Retry(e) + } + }); + + match check_result { + Ok(_) => Ok(()), + Err(e) => match e { + Error::Operation { error, .. } => Err(error), + Error::Internal(_) => Err(TcpCheckErrors::UnknownError), + }, + } +} + pub fn print_action( cloud_provider_name: &str, struct_name: &str, @@ -261,11 +336,31 @@ pub fn print_action( #[cfg(test)] mod tests { - use crate::cloud_provider::utilities::{dns_resolvers, get_cname_record_value}; + use crate::cloud_provider::utilities::{ + check_tcp_port_is_open, dns_resolvers, get_cname_record_value, TcpCheckErrors, TcpCheckSource, + }; use crate::errors::CommandError; use crate::models::types::VersionsNumber; use std::str::FromStr; + #[test] + pub fn test_port_open() { + let address_ok = "www.qovery.com"; + let port_ok: u16 = 443; + let address_nok = "www.abcdefghijklmnopqrstuvwxyz.com"; + let port_nok: u16 = 4430; + + assert!(check_tcp_port_is_open(&TcpCheckSource::DnsName(address_ok), port_ok).is_ok()); + assert_eq!( + check_tcp_port_is_open(&TcpCheckSource::DnsName(address_nok), port_ok).unwrap_err(), + TcpCheckErrors::DomainNotResolvable + ); + assert_eq!( + check_tcp_port_is_open(&TcpCheckSource::DnsName(address_ok), port_nok).unwrap_err(), + TcpCheckErrors::PortNotOpen + ); + } + #[test] pub fn test_cname_resolution() { let resolvers = dns_resolvers(); diff --git a/src/errors/io.rs b/src/errors/io.rs index 217bdcd4..9a5a83db 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -84,6 +84,7 @@ pub enum Tag { CloudProviderApiMissingInfo, K8sValidateRequiredCPUandBurstableError, TerraformContextUnsupportedParameterValue, + TerraformQoveryConfigMismatch, ClientServiceFailedToStart, ClientServiceFailedToDeployBeforeStart, DatabaseFailedToStartAfterSeveralRetries, @@ -226,6 +227,7 @@ impl From for Tag { errors::Tag::BuilderError => Tag::BuilderError, errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError, errors::Tag::UnsupportedClusterKind => Tag::UnsupportedClusterKind, + errors::Tag::TerraformQoveryConfigMismatch => Tag::TerraformQoveryConfigMismatch, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 05316c65..8925f0d1 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -249,6 +249,8 @@ pub enum Tag { CannotCopyFilesFromDirectoryToDirectory, /// CannotPauseClusterTasksAreRunning: represents an error where we cannot pause the cluster because some tasks are still running in the engine. CannotPauseClusterTasksAreRunning, + /// TerraformQoveryConfigMismatch: terraform qovery config retrieve mismatch + TerraformQoveryConfigMismatch, /// TerraformCannotRemoveEntryOut: represents an error where we cannot remove an entry out of Terraform. TerraformCannotRemoveEntryOut, /// TerraformNoStateFileExists: represents an error where there is no Terraform state file. @@ -1603,6 +1605,26 @@ impl EngineError { ) } + /// Creates new error for terraform qovery config mismatch + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `raw_error`: Raw error message. + pub fn new_terraform_qovery_config_mismatch(event_details: EventDetails, raw_error: CommandError) -> EngineError { + let message = "Error while trying to use Qovery Terraform generated config."; + + EngineError::new( + event_details, + Tag::TerraformQoveryConfigMismatch, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + /// Creates new error for removing an element out of terraform. /// /// Arguments: From 32714cae00376c166ec3b75c0bbf5ff389b1884e Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Fri, 29 Apr 2022 17:48:56 +0200 Subject: [PATCH 102/122] feat: aws kubeconfig retrieval improvment This CL tries to introduce a better handling of S3 eventual consitency. https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel Issue was at cluster creation, we try to get a kubeconfig which doesn't exists yet in order to know whether or not the cluster exists and needs to be upgraded. It leads to S3 caching the fact this bucket doesn't exists and reject later s# get while the bucket actually exists. By introducing a retry mechanism, we allow s3 to properly propagate new file / bucket creation and reach consistency. lint: remove redundant clone() build: fix moved value chore: upgrade ec2 terraform libs feat: add kubeconfig to s3 + firewall rules + clean + EC2 helm charts fix: fix several issues feat: adding EC2 port open check to avoid random issues --- .../charts/coredns-config/.helmignore | 23 + .../charts/coredns-config/Chart.yaml | 6 + .../coredns-config/templates/_helpers.tpl | 62 +++ .../coredns-config/templates/configmap.yml | 31 ++ .../charts/coredns-config/values.yaml | 4 + .../charts/q-storageclass/.helmignore | 23 + .../charts/q-storageclass/Chart.yaml | 23 + .../q-storageclass/templates/_helpers.tpl | 63 +++ .../templates/storageclass.yaml | 64 +++ .../charts/q-storageclass/values.yaml | 0 lib/aws-ec2/bootstrap/documentdb.tf | 14 +- lib/aws-ec2/bootstrap/ec2-sec-group.tf | 34 +- lib/aws-ec2/bootstrap/ec2.j2.tf | 25 +- lib/aws-ec2/bootstrap/elasticcache.tf | 14 +- lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf | 11 +- lib/aws-ec2/bootstrap/rds.tf | 24 +- lib/aws-ec2/bootstrap/s3-qovery-buckets.tf | 29 +- lib/aws-ec2/bootstrap/tf-default-vars.j2.tf | 2 +- lib/aws-ec2/bootstrap/tf-providers-aws.j2.tf | 16 +- .../aws/kubernetes/ec2_helm_charts.rs | 449 ++++++++++++++++++ .../{helm_charts.rs => eks_helm_charts.rs} | 10 +- src/cloud_provider/aws/kubernetes/mod.rs | 196 +++++--- .../digitalocean/kubernetes/mod.rs | 32 +- src/cloud_provider/kubernetes.rs | 19 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 30 -- src/cloud_provider/utilities.rs | 99 +++- src/errors/io.rs | 4 + src/errors/mod.rs | 50 ++ tests/aws/aws_kubernetes_ec2.rs | 2 - 29 files changed, 1125 insertions(+), 234 deletions(-) create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml create mode 100644 lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml create mode 100644 lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml create mode 100644 src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs rename src/cloud_provider/aws/kubernetes/{helm_charts.rs => eks_helm_charts.rs} (99%) diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore b/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml b/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml new file mode 100644 index 00000000..6773a3f6 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: coredns-config +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: 0.1 diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl new file mode 100644 index 00000000..0c858639 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns-config.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "coredns-config.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "coredns-config.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "coredns-config.labels" -}} +helm.sh/chart: {{ include "coredns-config.chart" . }} +{{ include "coredns-config.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "coredns-config.selectorLabels" -}} +app.kubernetes.io/name: {{ include "coredns-config.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns-config.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "coredns-config.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml new file mode 100644 index 00000000..0cd29a18 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/templates/configmap.yml @@ -0,0 +1,31 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: coredns + namespace: kube-system + labels: + eks.amazonaws.com/component: coredns + k8s-app: kube-dns +data: + Corefile: | + .:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } + {{- range .Values.managed_dns }} + {{ . }}:53 { + errors + cache 30 + forward . {{ join " " $.Values.managed_dns_resolvers }} + } + {{ end }} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml b/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml new file mode 100644 index 00000000..843a6389 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/coredns-config/values.yaml @@ -0,0 +1,4 @@ +# List of managed DNS +managed_dns: [] +# List of resolvers +managed_dns_resolvers: [] diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore b/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml new file mode 100644 index 00000000..8227ff32 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: q-storageclass +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 0.1 diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl new file mode 100644 index 00000000..0edf0421 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "q-ebs-csi-config.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "q-ebs-csi-config.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "q-ebs-csi-config.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "q-ebs-csi-config.labels" -}} +helm.sh/chart: {{ include "q-ebs-csi-config.chart" . }} +{{ include "q-ebs-csi-config.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "q-ebs-csi-config.selectorLabels" -}} +app.kubernetes.io/name: {{ include "q-ebs-csi-config.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "q-ebs-csi-config.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "q-ebs-csi-config.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml new file mode 100644 index 00000000..abdd4923 --- /dev/null +++ b/lib/aws-ec2/bootstrap/charts/q-storageclass/templates/storageclass.yaml @@ -0,0 +1,64 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-gp2-0 + labels: + aws-type: "gp2" + qovery-type: "ssd" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: gp2 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-io1-0 + labels: + aws-type: "io1" + qovery-type: "nvme" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: io1 + iopsPerGB: "32" + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-st1-0 + labels: + aws-type: "st1" + qovery-type: "hdd" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: st1 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: aws-ebs-sc1-0 + labels: + aws-type: "sc1" + qovery-type: "cold" + reclaim: "0" +provisioner: kubernetes.io/aws-ebs +parameters: + type: sc1 + encrypted: 'true' +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +reclaimPolicy: Delete \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml b/lib/aws-ec2/bootstrap/charts/q-storageclass/values.yaml new file mode 100644 index 00000000..e69de29b diff --git a/lib/aws-ec2/bootstrap/documentdb.tf b/lib/aws-ec2/bootstrap/documentdb.tf index 7828faf5..292fb78a 100644 --- a/lib/aws-ec2/bootstrap/documentdb.tf +++ b/lib/aws-ec2/bootstrap/documentdb.tf @@ -66,16 +66,4 @@ resource "aws_docdb_subnet_group" "documentdb" { subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id]) tags = local.tags_documentdb -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "documentdb_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow DocumentDB incoming access from anywhere" - from_port = 27017 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 27017 - type = "ingress" -} +} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2-sec-group.tf b/lib/aws-ec2/bootstrap/ec2-sec-group.tf index f8c94814..a37b260b 100644 --- a/lib/aws-ec2/bootstrap/ec2-sec-group.tf +++ b/lib/aws-ec2/bootstrap/ec2-sec-group.tf @@ -1,3 +1,9 @@ +# randomize inbound kubernetes port number for more security +resource "random_integer" "kubernetes_external_port" { + min = 1024 + max = 65534 +} + resource "aws_security_group" "ec2_instance" { name = "qovery-ec2-${var.kubernetes_cluster_id}" description = "Cluster communication with worker nodes" @@ -10,15 +16,23 @@ resource "aws_security_group" "ec2_instance" { cidr_blocks = ["0.0.0.0/0"] } - tags = local.tags_ec2 -} + // nginx ingress + ingress { + description = "HTTPS connectivity" + from_port = 443 + protocol = "tcp" + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] + } -resource "aws_security_group_rule" "https" { - cidr_blocks = ["0.0.0.0/0"] - description = "HTTPS connectivity" - from_port = 443 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 443 - type = "ingress" + // kubernetes + ingress { + description = "Kubernetes connectivity" + from_port = random_integer.kubernetes_external_port.result + protocol = "tcp" + to_port = random_integer.kubernetes_external_port.result + cidr_blocks = ["0.0.0.0/0"] + } + + tags = local.tags_ec2 } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/ec2.j2.tf b/lib/aws-ec2/bootstrap/ec2.j2.tf index 756276ef..62c5a17b 100644 --- a/lib/aws-ec2/bootstrap/ec2.j2.tf +++ b/lib/aws-ec2/bootstrap/ec2.j2.tf @@ -33,9 +33,14 @@ resource "aws_instance" "ec2_instance" { # security vpc_security_group_ids = [aws_security_group.ec2_instance.id] subnet_id = aws_subnet.ec2_zone_a[0].id - security_groups = [aws_security_group.ec2_instance.id] user_data = local.bootstrap + user_data_replace_on_change = true + +# lifecycle { +# // user data changes, forces to restart the EC2 instance +# ignore_changes = [user_data] +# } tags = merge( local.tags_common, @@ -43,6 +48,10 @@ resource "aws_instance" "ec2_instance" { "Service" = "EC2" } ) + + depends_on = [ + aws_s3_bucket.kubeconfigs_bucket + ] } resource "time_static" "on_ec2_create" {} @@ -50,12 +59,16 @@ resource "time_static" "on_ec2_create" {} locals { bootstrap = <> /etc/profile @@ -63,6 +76,12 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do echo "kubeconfig is not yet present, sleeping" sleep 1 done -s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put /etc/rancher/k3s/k3s.yaml s3://${var.s3_bucket_kubeconfig}/${var.kubernetes_cluster_id}.yaml + +# Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap +public_hostname="$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)" +sed "s/127.0.0.1/$public_hostname/g" /etc/rancher/k3s/k3s.yaml > $NEW_KUBECONFIG_PATH +sed -i "s/:6443/:${random_integer.kubernetes_external_port.result}/g" $NEW_KUBECONFIG_PATH +s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $NEW_KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME +rm -f $NEW_KUBECONFIG_PATH BOOTSTRAP } \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/elasticcache.tf b/lib/aws-ec2/bootstrap/elasticcache.tf index 2e703dac..b4a37b16 100644 --- a/lib/aws-ec2/bootstrap/elasticcache.tf +++ b/lib/aws-ec2/bootstrap/elasticcache.tf @@ -65,16 +65,4 @@ resource "aws_elasticache_subnet_group" "elasticache" { # WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly name = "elasticache-${aws_vpc.ec2.id}" subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id]) -} - -# Todo: create a bastion to avoid this - -resource "aws_security_group_rule" "elasticache_remote_access" { - cidr_blocks = ["0.0.0.0/0"] - description = "Allow Redis incoming access from anywhere" - from_port = 6379 - protocol = "tcp" - security_group_id = aws_security_group.ec2_instance.id - to_port = 6379 - type = "ingress" -} +} \ No newline at end of file diff --git a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf index 0044f5f0..9bec1658 100644 --- a/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf +++ b/lib/aws-ec2/bootstrap/qovery-tf-config.j2.tf @@ -1,15 +1,8 @@ locals { qovery_tf_config = < Result { + match self.aws_ec2_kubernetes_port.parse::() { + Ok(x) => Ok(x), + Err(e) => Err(format!( + "error while trying to convert kubernetes port from string {} to int: {}", + self.aws_ec2_kubernetes_port, e + )), + } + } +} + +pub struct Ec2ChartsConfigPrerequisites { + pub organization_id: String, + pub organization_long_id: uuid::Uuid, + pub cluster_id: String, + pub cluster_long_id: uuid::Uuid, + pub region: String, + pub cluster_name: String, + pub cloud_provider: String, + pub test_cluster: bool, + pub aws_access_key_id: String, + pub aws_secret_access_key: String, + pub vpc_qovery_network_mode: VpcQoveryNetworkMode, + pub qovery_engine_location: EngineLocation, + pub ff_log_history_enabled: bool, + pub ff_metrics_history_enabled: bool, + pub managed_dns_name: String, + pub managed_dns_helm_format: String, + pub managed_dns_resolvers_terraform_format: String, + pub external_dns_provider: String, + pub dns_email_report: String, + pub acme_url: String, + pub cloudflare_email: String, + pub cloudflare_api_token: String, + pub disable_pleco: bool, + // qovery options form json input + pub infra_options: Options, +} + +pub fn get_aws_ec2_qovery_terraform_config( + qovery_terraform_config_file: &str, +) -> Result { + let content_file = match File::open(&qovery_terraform_config_file) { + Ok(x) => x, + Err(e) => { + return Err(CommandError::new( + "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(), + Some(e.to_string()), + None, + )); + } + }; + + let reader = BufReader::new(content_file); + match serde_json::from_reader(reader) { + Ok(config) => Ok(config), + Err(e) => Err(CommandError::new( + format!("Error while parsing terraform config file {}", qovery_terraform_config_file), + Some(e.to_string()), + None, + )), + } +} + +pub fn ec2_aws_helm_charts( + qovery_terraform_config_file: &str, + chart_config_prerequisites: &Ec2ChartsConfigPrerequisites, + chart_prefix_path: Option<&str>, + kubernetes_config: &Path, + envs: &[(String, String)], +) -> Result>>, CommandError> { + let chart_prefix = chart_prefix_path.unwrap_or("./"); + let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; + let qovery_terraform_config = get_aws_ec2_qovery_terraform_config(qovery_terraform_config_file)?; + + // Qovery storage class + let q_storage_class = CommonChart { + chart_info: ChartInfo { + name: "q-storageclass".to_string(), + path: chart_path("/charts/q-storageclass"), + ..Default::default() + }, + }; + + // Calico for AWS + let aws_calico = CommonChart { + chart_info: ChartInfo { + name: "calico".to_string(), + path: chart_path("charts/aws-calico"), + ..Default::default() + }, + }; + + let coredns_config = CoreDNSConfigChart { + chart_info: ChartInfo { + name: "coredns".to_string(), + path: chart_path("/charts/coredns-config"), + values: vec![ + ChartSetValue { + key: "managed_dns".to_string(), + value: chart_config_prerequisites.managed_dns_helm_format.clone(), + }, + ChartSetValue { + key: "managed_dns_resolvers".to_string(), + value: chart_config_prerequisites + .managed_dns_resolvers_terraform_format + .clone(), + }, + ], + ..Default::default() + }, + }; + + let external_dns = CommonChart { + chart_info: ChartInfo { + name: "externaldns".to_string(), + path: chart_path("common/charts/external-dns"), + values_files: vec![chart_path("chart_values/external-dns.yaml")], + values: vec![ + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "50Mi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "50Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cert_manager = CommonChart { + chart_info: ChartInfo { + name: "cert-manager".to_string(), + path: chart_path("common/charts/cert-manager"), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + // https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + ChartSetValue { + key: "extraArgs".to_string(), + value: "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}" + .to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.enabled".to_string(), + // Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require + // prometheus to be already installed + value: "false".to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.prometheusInstance".to_string(), + value: "qovery".to_string(), + }, + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "1Gi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "1Gi".to_string(), + }, + // Webhooks resources limits + ChartSetValue { + key: "webhook.resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "webhook.resources.requests.cpu".to_string(), + value: "50m".to_string(), + }, + ChartSetValue { + key: "webhook.resources.limits.memory".to_string(), + value: "128Mi".to_string(), + }, + ChartSetValue { + key: "webhook.resources.requests.memory".to_string(), + value: "128Mi".to_string(), + }, + // Cainjector resources limits + ChartSetValue { + key: "cainjector.resources.limits.cpu".to_string(), + value: "500m".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.limits.memory".to_string(), + value: "1Gi".to_string(), + }, + ChartSetValue { + key: "cainjector.resources.requests.memory".to_string(), + value: "1Gi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let mut cert_manager_config = CommonChart { + chart_info: ChartInfo { + name: "cert-manager-configs".to_string(), + path: chart_path("common/charts/cert-manager-configs"), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "externalDnsProvider".to_string(), + value: chart_config_prerequisites.external_dns_provider.clone(), + }, + ChartSetValue { + key: "acme.letsEncrypt.emailReport".to_string(), + value: chart_config_prerequisites.dns_email_report.clone(), + }, + ChartSetValue { + key: "acme.letsEncrypt.acmeUrl".to_string(), + value: chart_config_prerequisites.acme_url.clone(), + }, + ChartSetValue { + key: "managedDns".to_string(), + value: chart_config_prerequisites.managed_dns_helm_format.clone(), + }, + ], + ..Default::default() + }, + }; + if chart_config_prerequisites.external_dns_provider == "cloudflare" { + cert_manager_config.chart_info.values.push(ChartSetValue { + key: "provider.cloudflare.apiToken".to_string(), + value: chart_config_prerequisites.cloudflare_api_token.clone(), + }); + cert_manager_config.chart_info.values.push(ChartSetValue { + key: "provider.cloudflare.email".to_string(), + value: chart_config_prerequisites.cloudflare_email.clone(), + }) + } + + let nginx_ingress = CommonChart { + chart_info: ChartInfo { + name: "nginx-ingress".to_string(), + path: chart_path("common/charts/ingress-nginx"), + namespace: HelmChartNamespaces::NginxIngress, + // Because of NLB, svc can take some time to start + timeout_in_seconds: 300, + values_files: vec![chart_path("chart_values/nginx-ingress.yaml")], + values: vec![ + // Controller resources limits + ChartSetValue { + key: "controller.resources.limits.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "controller.resources.requests.cpu".to_string(), + value: "100m".to_string(), + }, + ChartSetValue { + key: "controller.resources.limits.memory".to_string(), + value: "768Mi".to_string(), + }, + ChartSetValue { + key: "controller.resources.requests.memory".to_string(), + value: "768Mi".to_string(), + }, + // Default backend resources limits + ChartSetValue { + key: "defaultBackend.resources.limits.cpu".to_string(), + value: "20m".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.requests.cpu".to_string(), + value: "10m".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.limits.memory".to_string(), + value: "32Mi".to_string(), + }, + ChartSetValue { + key: "defaultBackend.resources.requests.memory".to_string(), + value: "32Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cluster_agent_context = ClusterAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; + + let shell_context = ShellAgentContext { + api_url: &chart_config_prerequisites.infra_options.qovery_api_url, + api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token, + organization_long_id: &chart_config_prerequisites.organization_long_id, + cluster_id: &chart_config_prerequisites.cluster_id, + cluster_long_id: &chart_config_prerequisites.cluster_long_id, + cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, + }; + let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?; + + let qovery_agent_version: QoveryAgent = get_qovery_app_version( + QoveryAppName::Agent, + &chart_config_prerequisites.infra_options.agent_version_controller_token, + &chart_config_prerequisites.infra_options.qovery_api_url, + &chart_config_prerequisites.cluster_id, + )?; + + let mut qovery_agent = CommonChart { + chart_info: ChartInfo { + name: "qovery-agent".to_string(), + path: chart_path("common/charts/qovery/qovery-agent"), + namespace: HelmChartNamespaces::Qovery, + values: vec![ + ChartSetValue { + key: "image.tag".to_string(), + value: qovery_agent_version.version, + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "environmentVariables.GRPC_SERVER".to_string(), + value: chart_config_prerequisites.infra_options.qovery_grpc_url.to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_TOKEN".to_string(), + value: chart_config_prerequisites + .infra_options + .qovery_cluster_secret_token + .to_string(), + }, + ChartSetValue { + key: "environmentVariables.CLUSTER_ID".to_string(), + value: chart_config_prerequisites.cluster_long_id.to_string(), + }, + ChartSetValue { + key: "environmentVariables.ORGANIZATION_ID".to_string(), + value: chart_config_prerequisites.organization_long_id.to_string(), + }, + ChartSetValue { + key: "environmentVariables.LOKI_URL".to_string(), + value: format!("http://{}.cluster.local:3100", "not-installed"), + }, + // resources limits + ChartSetValue { + key: "resources.limits.cpu".to_string(), + value: "1".to_string(), + }, + ChartSetValue { + key: "resources.requests.cpu".to_string(), + value: "200m".to_string(), + }, + ChartSetValue { + key: "resources.limits.memory".to_string(), + value: "500Mi".to_string(), + }, + ChartSetValue { + key: "resources.requests.memory".to_string(), + value: "500Mi".to_string(), + }, + ], + ..Default::default() + }, + }; + + if chart_config_prerequisites.ff_log_history_enabled { + qovery_agent.chart_info.values.push(ChartSetValue { + key: "environmentVariables.FEATURES".to_string(), + value: "LogsHistory".to_string(), + }) + } + + // chart deployment order matters!!! + let level_1: Vec> = vec![Box::new(q_storage_class), Box::new(coredns_config)]; + + let level_2: Vec> = vec![Box::new(cert_manager)]; + + let level_3: Vec> = vec![]; + + let level_4: Vec> = vec![Box::new(aws_calico)]; + + let level_5: Vec> = vec![Box::new(external_dns)]; + + let level_6: Vec> = vec![Box::new(nginx_ingress)]; + + let level_7: Vec> = vec![ + Box::new(cert_manager_config), + Box::new(qovery_agent), // TODO: Migrate to the new cluster agent + Box::new(cluster_agent), + Box::new(shell_agent), + ]; + + info!("charts configuration preparation finished"); + Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6, level_7]) +} diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs similarity index 99% rename from src/cloud_provider/aws/kubernetes/helm_charts.rs rename to src/cloud_provider/aws/kubernetes/eks_helm_charts.rs index e4595d2e..78469eb7 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs @@ -16,7 +16,7 @@ use std::thread::sleep; use std::time::Duration; #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AwsQoveryTerraformConfig { +pub struct AwsEksQoveryTerraformConfig { pub aws_iam_eks_user_mapper_key: String, pub aws_iam_eks_user_mapper_secret: String, pub aws_iam_cluster_autoscaler_key: String, @@ -28,7 +28,7 @@ pub struct AwsQoveryTerraformConfig { pub aws_iam_loki_storage_secret: String, } -pub struct ChartsConfigPrerequisites { +pub struct EksChartsConfigPrerequisites { pub organization_id: String, pub organization_long_id: uuid::Uuid, pub cluster_id: String, @@ -56,9 +56,9 @@ pub struct ChartsConfigPrerequisites { pub infra_options: Options, } -pub fn aws_helm_charts( +pub fn eks_aws_helm_charts( qovery_terraform_config_file: &str, - chart_config_prerequisites: &ChartsConfigPrerequisites, + chart_config_prerequisites: &EksChartsConfigPrerequisites, chart_prefix_path: Option<&str>, kubernetes_config: &Path, envs: &[(String, String)], @@ -76,7 +76,7 @@ pub fn aws_helm_charts( let chart_prefix = chart_prefix_path.unwrap_or("./"); let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; let reader = BufReader::new(content_file); - let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) { + let qovery_terraform_config: AwsEksQoveryTerraformConfig = match serde_json::from_reader(reader) { Ok(config) => config, Err(e) => { return Err(CommandError::new( diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 4ef90fbe..0051c55e 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -8,15 +8,19 @@ use retry::OperationResult; use serde::{Deserialize, Serialize}; use tera::Context as TeraContext; -use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites}; +use crate::cloud_provider::aws::kubernetes::ec2_helm_charts::{ + ec2_aws_helm_charts, get_aws_ec2_qovery_terraform_config, Ec2ChartsConfigPrerequisites, +}; +use crate::cloud_provider::aws::kubernetes::eks_helm_charts::{eks_aws_helm_charts, EksChartsConfigPrerequisites}; use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create; use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones}; use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo}; use crate::cloud_provider::kubernetes::{ - is_kubernetes_upgrade_required, uninstall_cert_manager, Kubernetes, ProviderOptions, + is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, ProviderOptions, }; use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat}; use crate::cloud_provider::qovery::EngineLocation; +use crate::cloud_provider::utilities::{wait_until_port_is_open, TcpCheckSource}; use crate::cloud_provider::CloudProvider; use crate::cmd; use crate::cmd::helm::{to_engine_error, Helm}; @@ -32,8 +36,9 @@ use crate::object_storage::s3::S3; use crate::string::terraform_list_format; pub mod ec2; +mod ec2_helm_charts; pub mod eks; -pub mod helm_charts; +pub mod eks_helm_charts; pub mod node; pub mod roles; @@ -606,43 +611,45 @@ fn create( &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => kubernetes.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )); - } - } - }; - } - } - } - Err(e) => kubernetes.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // terraform deployment dedicated to cloud resources if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), kubernetes.context().is_dry_run_deploy()) { return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e)); } + // wait for AWS EC2 K3S port is open to avoid later deployment issues (and kubeconfig not available on S3) + match kubernetes.kind() { + Kind::Ec2 => { + let qovery_teraform_config = + get_aws_ec2_qovery_terraform_config(format!("{}/qovery-tf-config.json", &temp_dir).as_str()) + .map_err(|e| EngineError::new_terraform_qovery_config_mismatch(event_details.clone(), e))?; + + let port = qovery_teraform_config.kubernetes_port_to_u16().map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( + event_details.clone(), + CommandError::new_from_safe_message(e), + ) + })?; + + wait_until_port_is_open( + &TcpCheckSource::DnsName(qovery_teraform_config.aws_ec2_public_hostname.as_str()), + port, + 300, + kubernetes.logger(), + event_details.clone(), + ) + .map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( + event_details.clone(), + CommandError::new(format!( + "Wasn't able to connect to Kubernetes API, can't continue. Did you manually performed changes AWS side?" + ), Some(format!("{:?}", e)), None), + ) + })?; + } + _ => {} + } + // kubernetes helm deployments on the cluster - // todo: instead of downloading kubeconfig file, use the one that has just been generated by terraform let kubeconfig_path = kubernetes.get_kubeconfig_file_path()?; let kubeconfig_path = Path::new(&kubeconfig_path); @@ -653,46 +660,97 @@ fn create( .map(|x| (x.0.to_string(), x.1.to_string())) .collect(); - let charts_prerequisites = ChartsConfigPrerequisites { - organization_id: kubernetes.cloud_provider().organization_id().to_string(), - organization_long_id: kubernetes.cloud_provider().organization_long_id(), - infra_options: options.clone(), - cluster_id: kubernetes.id().to_string(), - cluster_long_id: kubernetes_long_id, - region: kubernetes.region(), - cluster_name: kubernetes.cluster_name(), - cloud_provider: "aws".to_string(), - test_cluster: kubernetes.context().is_test_cluster(), - aws_access_key_id: kubernetes.cloud_provider().access_key_id(), - aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), - vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), - qovery_engine_location: options.qovery_engine_location.clone(), - ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), - ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), - managed_dns_name: kubernetes.dns_provider().domain().to_string(), - managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), - managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(kubernetes.dns_provider()), - external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), - dns_email_report: options.tls_email_report.clone(), - acme_url: lets_encrypt_url(kubernetes.context()), - cloudflare_email: kubernetes.dns_provider().account().to_string(), - cloudflare_api_token: kubernetes.dns_provider().token().to_string(), - disable_pleco: kubernetes.context().disable_pleco(), - }; - kubernetes.logger().log(EngineEvent::Info( event_details.clone(), EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()), )); - let helm_charts_to_deploy = aws_helm_charts( - format!("{}/qovery-tf-config.json", &temp_dir).as_str(), - &charts_prerequisites, - Some(&temp_dir), - kubeconfig_path, - &credentials_environment_variables, - ) - .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?; + let helm_charts_to_deploy = match kubernetes.kind() { + Kind::Eks => { + let charts_prerequisites = EksChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format( + kubernetes.dns_provider(), + ), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + eks_aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))? + } + Kind::Ec2 => { + let charts_prerequisites = Ec2ChartsConfigPrerequisites { + organization_id: kubernetes.cloud_provider().organization_id().to_string(), + organization_long_id: kubernetes.cloud_provider().organization_long_id(), + infra_options: options.clone(), + cluster_id: kubernetes.id().to_string(), + cluster_long_id: kubernetes_long_id, + region: kubernetes.region(), + cluster_name: kubernetes.cluster_name(), + cloud_provider: "aws".to_string(), + test_cluster: kubernetes.context().is_test_cluster(), + aws_access_key_id: kubernetes.cloud_provider().access_key_id(), + aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(), + vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(), + qovery_engine_location: options.qovery_engine_location.clone(), + ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory), + ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory), + managed_dns_name: kubernetes.dns_provider().domain().to_string(), + managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(), + managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format( + kubernetes.dns_provider(), + ), + external_dns_provider: kubernetes.dns_provider().provider_name().to_string(), + dns_email_report: options.tls_email_report.clone(), + acme_url: lets_encrypt_url(kubernetes.context()), + cloudflare_email: kubernetes.dns_provider().account().to_string(), + cloudflare_api_token: kubernetes.dns_provider().token().to_string(), + disable_pleco: kubernetes.context().disable_pleco(), + }; + ec2_aws_helm_charts( + format!("{}/qovery-tf-config.json", &temp_dir).as_str(), + &charts_prerequisites, + Some(&temp_dir), + kubeconfig_path, + &credentials_environment_variables, + ) + .map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))? + } + _ => { + let safe_message = format!("unsupported requested cluster type: {}", kubernetes.kind()); + return Err(EngineError::new_unsupported_cluster_kind( + event_details, + &safe_message, + CommandError::new(safe_message.to_string(), None, None), + )); + } + }; deploy_charts_levels( kubeconfig_path, diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 4afe105d..c75a6eef 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -31,7 +31,7 @@ use crate::cmd::helm::{to_engine_error, Helm}; use crate::cmd::kubectl::{ do_kubectl_exec_get_loadbalancer_id, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, }; -use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; +use crate::cmd::terraform::terraform_init_validate_plan_apply; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; @@ -540,36 +540,6 @@ impl DOKS { &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // Logs bucket if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) { let error = diff --git a/src/cloud_provider/kubernetes.rs b/src/cloud_provider/kubernetes.rs index 4ff9cf5c..acc96ce1 100644 --- a/src/cloud_provider/kubernetes.rs +++ b/src/cloud_provider/kubernetes.rs @@ -123,22 +123,31 @@ pub trait Kubernetes: Listen { (StringPath::from(&local_kubeconfig_generated), kubeconfig_file) } - None => { + None => match retry::retry(Fibonacci::from_millis(5000).take(5), || { match self .config_file_store() .get(bucket_name.as_str(), object_key.as_str(), true) { - Ok((path, file)) => (path, file), + Ok((path, file)) => retry::OperationResult::Ok((path, file)), Err(err) => { let error = EngineError::new_cannot_retrieve_cluster_config_file( - self.get_event_details(stage), + self.get_event_details(stage.clone()), err.into(), ); self.logger().log(EngineEvent::Error(error.clone(), None)); - return Err(error); + retry::OperationResult::Retry(error) } } - } + }) { + Ok((path, file)) => (path, file), + Err(Operation { error, .. }) => return Err(error), + Err(retry::Error::Internal(msg)) => { + return Err(EngineError::new_cannot_retrieve_cluster_config_file( + self.get_event_details(stage), + CommandError::new("Error while trying to get kubeconfig file.".to_string(), Some(msg), None), + )) + } + }, }; let metadata = match file.metadata() { diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index b8d9e693..8bf77758 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -664,36 +664,6 @@ impl Kapsule { &listeners_helper, ); - // temporary: remove helm/kube management from terraform - match terraform_init_validate_state_list(temp_dir.as_str()) { - Ok(x) => { - let items_type = vec!["helm_release", "kubernetes_namespace"]; - for item in items_type { - for entry in x.clone() { - if entry.starts_with(item) { - match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) { - Ok(_) => self.logger().log(EngineEvent::Info( - event_details.clone(), - EventMessage::new_from_safe(format!("successfully removed {}", &entry)), - )), - Err(e) => { - return Err(EngineError::new_terraform_cannot_remove_entry_out( - event_details, - entry.to_string(), - e, - )) - } - } - }; - } - } - } - Err(e) => self.logger().log(EngineEvent::Error( - EngineError::new_terraform_state_does_not_exist(event_details.clone(), e), - None, - )), - }; - // TODO(benjaminch): move this elsewhere // Create object-storage buckets self.logger().log(EngineEvent::Info( diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 5a0949c0..9a7b49f7 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -9,7 +9,10 @@ use core::option::Option::{None, Some}; use core::result::Result; use core::result::Result::{Err, Ok}; use retry::delay::Fixed; -use retry::OperationResult; +use retry::{Error, OperationResult}; +use std::fmt; +use std::net::ToSocketAddrs; +use std::net::{SocketAddr, TcpStream as NetTcpStream}; use trust_dns_resolver::config::*; use trust_dns_resolver::proto::rr::{RData, RecordType}; use trust_dns_resolver::Resolver; @@ -244,6 +247,78 @@ pub fn managed_db_name_sanitizer(max_size: usize, prefix: &str, name: &str) -> S new_name } +#[derive(PartialEq, Debug)] +pub enum TcpCheckErrors { + DomainNotResolvable, + PortNotOpen, + UnknownError, +} + +pub enum TcpCheckSource<'a> { + SocketAddr(SocketAddr), + DnsName(&'a str), +} + +impl fmt::Display for TcpCheckSource<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TcpCheckSource::SocketAddr(x) => write!(f, "{}", x), + TcpCheckSource::DnsName(x) => write!(f, "{}", x), + } + } +} + +pub fn check_tcp_port_is_open(address: &TcpCheckSource, port: u16) -> Result<(), TcpCheckErrors> { + let timeout = core::time::Duration::from_secs(1); + + let ip = match address { + TcpCheckSource::SocketAddr(x) => x.clone(), + TcpCheckSource::DnsName(x) => { + let address = format!("{}:{}", x, port); + match address.to_socket_addrs() { + Ok(x) => { + let ips: Vec = x.collect(); + ips[0] + } + Err(_) => return Err(TcpCheckErrors::DomainNotResolvable), + } + } + }; + + match NetTcpStream::connect_timeout(&ip, timeout) { + Ok(_) => Ok(()), + Err(_) => Err(TcpCheckErrors::PortNotOpen), + } +} + +pub fn wait_until_port_is_open( + address: &TcpCheckSource, + port: u16, + max_timeout: usize, + logger: &dyn Logger, + event_details: EventDetails, +) -> Result<(), TcpCheckErrors> { + let fixed_iterable = Fixed::from(core::time::Duration::from_secs(1)).take(max_timeout); + let check_result = retry::retry(fixed_iterable, || match check_tcp_port_is_open(address, port) { + Ok(_) => OperationResult::Ok(()), + Err(e) => { + logger.log(EngineEvent::Info( + event_details.clone(), + EventMessage::new_from_safe(format!("{}:{} is still not ready: {:?}. retrying...", address, port, e)), + )); + OperationResult::Retry(e) + } + }); + + match check_result { + Ok(_) => Ok(()), + Err(e) => match e { + Error::Operation { error, .. } => Err(error), + Error::Internal(_) => Err(TcpCheckErrors::UnknownError), + }, + } +} + pub fn print_action( cloud_provider_name: &str, struct_name: &str, @@ -261,11 +336,31 @@ pub fn print_action( #[cfg(test)] mod tests { - use crate::cloud_provider::utilities::{dns_resolvers, get_cname_record_value}; + use crate::cloud_provider::utilities::{ + check_tcp_port_is_open, dns_resolvers, get_cname_record_value, TcpCheckErrors, TcpCheckSource, + }; use crate::errors::CommandError; use crate::models::types::VersionsNumber; use std::str::FromStr; + #[test] + pub fn test_port_open() { + let address_ok = "www.qovery.com"; + let port_ok: u16 = 443; + let address_nok = "www.abcdefghijklmnopqrstuvwxyz.com"; + let port_nok: u16 = 4430; + + assert!(check_tcp_port_is_open(&TcpCheckSource::DnsName(address_ok), port_ok).is_ok()); + assert_eq!( + check_tcp_port_is_open(&TcpCheckSource::DnsName(address_nok), port_ok).unwrap_err(), + TcpCheckErrors::DomainNotResolvable + ); + assert_eq!( + check_tcp_port_is_open(&TcpCheckSource::DnsName(address_ok), port_nok).unwrap_err(), + TcpCheckErrors::PortNotOpen + ); + } + #[test] pub fn test_cname_resolution() { let resolvers = dns_resolvers(); diff --git a/src/errors/io.rs b/src/errors/io.rs index fa895a2c..9a5a83db 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -73,6 +73,7 @@ pub enum Tag { HelmHistoryError, CannotGetAnyAvailableVPC, UnsupportedVersion, + UnsupportedClusterKind, CannotGetSupportedVersions, CannotGetCluster, ContainerRegistryError, @@ -83,6 +84,7 @@ pub enum Tag { CloudProviderApiMissingInfo, K8sValidateRequiredCPUandBurstableError, TerraformContextUnsupportedParameterValue, + TerraformQoveryConfigMismatch, ClientServiceFailedToStart, ClientServiceFailedToDeployBeforeStart, DatabaseFailedToStartAfterSeveralRetries, @@ -224,6 +226,8 @@ impl From for Tag { } errors::Tag::BuilderError => Tag::BuilderError, errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError, + errors::Tag::UnsupportedClusterKind => Tag::UnsupportedClusterKind, + errors::Tag::TerraformQoveryConfigMismatch => Tag::TerraformQoveryConfigMismatch, } } } diff --git a/src/errors/mod.rs b/src/errors/mod.rs index dd47f0a8..8925f0d1 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -175,6 +175,8 @@ pub enum Tag { CannotGetWorkspaceDirectory, /// UnsupportedInstanceType: represents an unsupported instance type for the given cloud provider. UnsupportedInstanceType, + /// UnsupportedClusterKind: represents an unsupported cluster kind by Qovery. + UnsupportedClusterKind, /// UnsupportedRegion: represents an unsupported region for the given cloud provider. UnsupportedRegion, /// UnsupportedZone: represents an unsupported zone in region for the given cloud provider. @@ -247,6 +249,8 @@ pub enum Tag { CannotCopyFilesFromDirectoryToDirectory, /// CannotPauseClusterTasksAreRunning: represents an error where we cannot pause the cluster because some tasks are still running in the engine. CannotPauseClusterTasksAreRunning, + /// TerraformQoveryConfigMismatch: terraform qovery config retrieve mismatch + TerraformQoveryConfigMismatch, /// TerraformCannotRemoveEntryOut: represents an error where we cannot remove an entry out of Terraform. TerraformCannotRemoveEntryOut, /// TerraformNoStateFileExists: represents an error where there is no Terraform state file. @@ -623,6 +627,32 @@ impl EngineError { ) } + /// Creates new error for unsupported cluster kind. + /// + /// Qovery doesn't support this kind of clusters. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `requested_kind`: Raw requested instance type string. + /// * `error_message`: Raw error message. + pub fn new_unsupported_cluster_kind( + event_details: EventDetails, + new_unsupported_cluster_kind: &str, + error_message: CommandError, + ) -> EngineError { + let message = format!("`{}` cluster kind is not supported", new_unsupported_cluster_kind); + EngineError::new( + event_details, + Tag::UnsupportedClusterKind, + message.to_string(), + message, + Some(error_message), + None, // TODO(documentation): Create a page entry to details this error + Some("Selected cluster kind is not supported, please check Qovery's documentation.".to_string()), + ) + } + /// Creates new error for unsupported region. /// /// Cloud provider doesn't support the requested region. @@ -1575,6 +1605,26 @@ impl EngineError { ) } + /// Creates new error for terraform qovery config mismatch + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `raw_error`: Raw error message. + pub fn new_terraform_qovery_config_mismatch(event_details: EventDetails, raw_error: CommandError) -> EngineError { + let message = "Error while trying to use Qovery Terraform generated config."; + + EngineError::new( + event_details, + Tag::TerraformQoveryConfigMismatch, + message.to_string(), + message.to_string(), + Some(raw_error), + None, + None, + ) + } + /// Creates new error for removing an element out of terraform. /// /// Arguments: diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 8f60bb8f..6985e106 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -12,8 +12,6 @@ use std::str::FromStr; use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; -pub const _AWS_K3S_VERSION: &str = "v1.20.15+k3s1"; - #[cfg(feature = "test-aws-infra-ec2")] fn create_and_destroy_aws_ec2_k3s_cluster( region: String, From b96d800ad599cedc9312cda982949cf2b55d596e Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 3 May 2022 17:46:41 +0200 Subject: [PATCH 103/122] fix: add wildcarded cluster domain to dns provider (#698) Ticket: ENG-1152 --- ...inx-ingress.yaml => nginx-ingress.j2.yaml} | 4 +++ .../chart_values/nginx-ingress.j2.yaml | 1 + ...inx-ingress.yaml => nginx-ingress.j2.yaml} | 1 + src/cloud_provider/aws/kubernetes/mod.rs | 1 + .../digitalocean/kubernetes/mod.rs | 1 + src/cloud_provider/scaleway/kubernetes/mod.rs | 1 + test_utilities/src/aws.rs | 4 ++- test_utilities/src/cloudflare.rs | 6 +++- test_utilities/src/common.rs | 30 ++++++++++++++----- test_utilities/src/digitalocean.rs | 4 ++- test_utilities/src/scaleway.rs | 4 ++- tests/aws/aws_kubernetes.rs | 8 ++--- tests/scaleway/scw_kubernetes.rs | 5 ++-- 13 files changed, 51 insertions(+), 19 deletions(-) rename lib/aws/bootstrap/chart_values/{nginx-ingress.yaml => nginx-ingress.j2.yaml} (84%) rename lib/scaleway/bootstrap/chart_values/{nginx-ingress.yaml => nginx-ingress.j2.yaml} (92%) diff --git a/lib/aws/bootstrap/chart_values/nginx-ingress.yaml b/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml similarity index 84% rename from lib/aws/bootstrap/chart_values/nginx-ingress.yaml rename to lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml index 3482a49e..370f4ac8 100644 --- a/lib/aws/bootstrap/chart_values/nginx-ingress.yaml +++ b/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml @@ -17,10 +17,14 @@ controller: targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 + publishService: + enabled: true + service: enabled: true annotations: service.beta.kubernetes.io/aws-load-balancer-type: nlb + external-dns.alpha.kubernetes.io/hostname: "{{ wildcard_managed_dns }}" externalTrafficPolicy: "Local" sessionAffinity: "" healthCheckNodePort: 0 \ No newline at end of file diff --git a/lib/digitalocean/bootstrap/chart_values/nginx-ingress.j2.yaml b/lib/digitalocean/bootstrap/chart_values/nginx-ingress.j2.yaml index eea65639..9ea3d1f1 100644 --- a/lib/digitalocean/bootstrap/chart_values/nginx-ingress.j2.yaml +++ b/lib/digitalocean/bootstrap/chart_values/nginx-ingress.j2.yaml @@ -23,4 +23,5 @@ controller: service.beta.kubernetes.io/do-loadbalancer-size-slug: "lb-small" service.beta.kubernetes.io/do-loadbalancer-enable-proxy-protocol: "true" service.beta.kubernetes.io/do-loadbalancer-hostname: {{ do_loadbalancer_hostname }} + external-dns.alpha.kubernetes.io/hostname: "{{ wildcard_managed_dns }}" externalTrafficPolicy: "Local" diff --git a/lib/scaleway/bootstrap/chart_values/nginx-ingress.yaml b/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml similarity index 92% rename from lib/scaleway/bootstrap/chart_values/nginx-ingress.yaml rename to lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml index 8f964673..46c14265 100644 --- a/lib/scaleway/bootstrap/chart_values/nginx-ingress.yaml +++ b/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml @@ -22,4 +22,5 @@ controller: service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: "false" service.beta.kubernetes.io/scw-loadbalancer-health-check-type: tcp service.beta.kubernetes.io/scw-loadbalancer-use-hostname: "false" + external-dns.alpha.kubernetes.io/hostname: "{{ wildcard_managed_dns }}" externalTrafficPolicy: "Local" \ No newline at end of file diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 79faf7f3..6039df06 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -414,6 +414,7 @@ impl EKS { "managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format, ); + context.insert("wildcard_managed_dns", &self.dns_provider().domain().wildcarded().to_string()); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index d9795030..2f15f74c 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -257,6 +257,7 @@ impl DOKS { "managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format, ); + context.insert("wildcard_managed_dns", &self.dns_provider().domain().wildcarded().to_string()); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 0fec4092..78fd8bde 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -456,6 +456,7 @@ impl Kapsule { "managed_dns_resolvers_terraform_format", &managed_dns_resolvers_terraform_format, ); + context.insert("wildcard_managed_dns", &self.dns_provider().domain().wildcarded().to_string()); match self.dns_provider.kind() { dns_provider::Kind::Cloudflare => { context.insert("external_dns_provider", self.dns_provider.provider_name()); diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 36fb944c..c65eab5d 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -61,7 +61,9 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> logger, AWS_TEST_REGION.to_string().as_str(), AWS_KUBERNETES_VERSION.to_string(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ) } diff --git a/test_utilities/src/cloudflare.rs b/test_utilities/src/cloudflare.rs index 6cee1e77..d00e9de2 100644 --- a/test_utilities/src/cloudflare.rs +++ b/test_utilities/src/cloudflare.rs @@ -8,7 +8,11 @@ pub fn dns_provider_cloudflare(context: &Context, domain: &ClusterDomain) -> Box let secrets = FuncTestsSecrets::new(); let domain = Domain::new(match domain { ClusterDomain::Custom(domain) => domain.to_string(), - ClusterDomain::Default => secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set"), + ClusterDomain::Default { cluster_id } => format!( + "{}.{}", + cluster_id, + secrets.CLOUDFLARE_DOMAIN.expect("CLOUDFLARE_DOMAIN is not set") + ), }); Box::new(Cloudflare::new( context.clone(), diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index a4c2275f..f0de9792 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -54,7 +54,7 @@ pub enum RegionActivationStatus { } pub enum ClusterDomain { - Default, + Default { cluster_id: String }, Custom(String), } @@ -1149,7 +1149,9 @@ pub fn test_db( logger.clone(), localisation.as_str(), kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), Kind::Do => DO::docker_cr_engine( @@ -1157,7 +1159,9 @@ pub fn test_db( logger.clone(), localisation.as_str(), kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), Kind::Scw => Scaleway::docker_cr_engine( @@ -1165,7 +1169,9 @@ pub fn test_db( logger.clone(), localisation.as_str(), kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), }; @@ -1200,7 +1206,7 @@ pub fn test_db( }; } DatabaseMode::MANAGED => { - match get_svc(context, provider_kind.clone(), environment.clone(), secrets.clone()) { + match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(svc) => { let service = svc .items @@ -1223,13 +1229,17 @@ pub fn test_db( } } + let cluster_id = context.cluster_id().to_string(); + let engine_config_for_delete = match provider_kind { Kind::Aws => AWS::docker_cr_engine( &context_for_delete, logger.clone(), localisation.as_str(), kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: cluster_id.to_string(), + }, None, ), Kind::Do => DO::docker_cr_engine( @@ -1237,7 +1247,9 @@ pub fn test_db( logger.clone(), localisation.as_str(), kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: cluster_id.to_string(), + }, None, ), Kind::Scw => Scaleway::docker_cr_engine( @@ -1245,7 +1257,9 @@ pub fn test_db( logger.clone(), localisation.as_str(), kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: cluster_id.to_string(), + }, None, ), }; diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 36a5db93..c052a882 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -49,7 +49,9 @@ pub fn do_default_engine_config(context: &Context, logger: Box) -> E logger, DO_TEST_REGION.to_string().as_str(), DO_KUBERNETES_VERSION.to_string(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ) } diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index d3c570bf..e55e1ca5 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -70,7 +70,9 @@ pub fn scw_default_engine_config(context: &Context, logger: Box) -> logger, SCW_TEST_ZONE.to_string().as_str(), SCW_KUBERNETES_VERSION.to_string(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ) } diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index 53f790b8..47ddb0e0 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -22,20 +22,18 @@ fn create_and_destroy_eks_cluster( engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); let zones = region.get_zones(); + let cluster_id = generate_cluster_id(region.to_string().as_str()); cluster_test( test_name, Kind::Aws, - context( - generate_id().as_str(), - generate_cluster_id(region.to_string().as_str()).as_str(), - ), + context(generate_id().as_str(), cluster_id.as_str()), logger(), region.to_aws_format().as_str(), Some(zones), test_type, major_boot_version, minor_boot_version, - &ClusterDomain::Default, + &ClusterDomain::Default { cluster_id }, Option::from(vpc_network_mode), None, ) diff --git a/tests/scaleway/scw_kubernetes.rs b/tests/scaleway/scw_kubernetes.rs index 952cc24d..4f6f515b 100644 --- a/tests/scaleway/scw_kubernetes.rs +++ b/tests/scaleway/scw_kubernetes.rs @@ -18,17 +18,18 @@ fn create_and_destroy_kapsule_cluster( vpc_network_mode: Option, ) { engine_run_test(|| { + let cluster_id = generate_cluster_id(zone.as_str()); cluster_test( test_name, Kind::Scw, - context(generate_id().as_str(), generate_cluster_id(zone.as_str()).as_str()), + context(generate_id().as_str(), cluster_id.as_str()), logger(), zone.as_str(), None, test_type, major_boot_version, minor_boot_version, - &ClusterDomain::Default, + &ClusterDomain::Default { cluster_id }, vpc_network_mode, None, ) From 838f83a83139ee5f28b12af07a227c73989e1a11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Romaric=20Philog=C3=A8ne?= Date: Tue, 3 May 2022 19:38:48 +0200 Subject: [PATCH 104/122] fix: merge from dev --- src/cloud_provider/aws/kubernetes/mod.rs | 5 ++++- src/cloud_provider/digitalocean/kubernetes/mod.rs | 2 +- tests/aws/aws_kubernetes_ec2.rs | 8 +++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 2703507e..67b3d7b9 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -350,7 +350,10 @@ fn tera_context( &managed_dns_resolvers_terraform_format, ); - context.insert("wildcard_managed_dns", &self.dns_provider().domain().wildcarded().to_string()); + context.insert( + "wildcard_managed_dns", + &kubernetes.dns_provider().domain().wildcarded().to_string(), + ); match kubernetes.dns_provider().kind() { dns_provider::Kind::Cloudflare => { diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 1db59b39..2f15f74c 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -31,7 +31,7 @@ use crate::cmd::helm::{to_engine_error, Helm}; use crate::cmd::kubectl::{ do_kubectl_exec_get_loadbalancer_id, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, }; -use crate::cmd::terraform::terraform_init_validate_plan_apply; +use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; diff --git a/tests/aws/aws_kubernetes_ec2.rs b/tests/aws/aws_kubernetes_ec2.rs index 6985e106..f8554dce 100644 --- a/tests/aws/aws_kubernetes_ec2.rs +++ b/tests/aws/aws_kubernetes_ec2.rs @@ -24,21 +24,19 @@ fn create_and_destroy_aws_ec2_k3s_cluster( engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); let zones = region.get_zones(); + let cluster_id = generate_cluster_id(region.to_string().as_str()); cluster_test( test_name, Kind::Aws, KKind::Ec2, - context( - generate_id().as_str(), - generate_cluster_id(region.to_string().as_str()).as_str(), - ), + context(generate_id().as_str(), cluster_id.as_str()), logger(), region.to_aws_format().as_str(), Some(zones), test_type, major_boot_version, minor_boot_version, - &ClusterDomain::Default, + &ClusterDomain::Default { cluster_id }, Option::from(vpc_network_mode), None, ) From 57c131bb548acf26197b65119170bb9b097e4258 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Wed, 4 May 2022 11:12:51 +0200 Subject: [PATCH 105/122] fix: test compil issue and linter --- .../aws/kubernetes/ec2_helm_charts.rs | 6 +-- src/cloud_provider/aws/kubernetes/mod.rs | 51 +++++++++---------- src/cloud_provider/utilities.rs | 2 +- tests/aws/aws_kubernetes.rs | 7 ++- 4 files changed, 30 insertions(+), 36 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs index bb5be4dd..2756894c 100644 --- a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs @@ -85,12 +85,12 @@ pub fn ec2_aws_helm_charts( qovery_terraform_config_file: &str, chart_config_prerequisites: &Ec2ChartsConfigPrerequisites, chart_prefix_path: Option<&str>, - kubernetes_config: &Path, - envs: &[(String, String)], + _kubernetes_config: &Path, + _envs: &[(String, String)], ) -> Result>>, CommandError> { let chart_prefix = chart_prefix_path.unwrap_or("./"); let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) }; - let qovery_terraform_config = get_aws_ec2_qovery_terraform_config(qovery_terraform_config_file)?; + let _qovery_terraform_config = get_aws_ec2_qovery_terraform_config(qovery_terraform_config_file)?; // Qovery storage class let q_storage_class = CommonChart { diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 67b3d7b9..89c1ab06 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -622,37 +622,32 @@ fn create( } // wait for AWS EC2 K3S port is open to avoid later deployment issues (and kubeconfig not available on S3) - match kubernetes.kind() { - Kind::Ec2 => { - let qovery_teraform_config = - get_aws_ec2_qovery_terraform_config(format!("{}/qovery-tf-config.json", &temp_dir).as_str()) - .map_err(|e| EngineError::new_terraform_qovery_config_mismatch(event_details.clone(), e))?; + if let Kind::Ec2 = kubernetes.kind() { + let qovery_teraform_config = + get_aws_ec2_qovery_terraform_config(format!("{}/qovery-tf-config.json", &temp_dir).as_str()) + .map_err(|e| EngineError::new_terraform_qovery_config_mismatch(event_details.clone(), e))?; - let port = qovery_teraform_config.kubernetes_port_to_u16().map_err(|e| { - EngineError::new_terraform_qovery_config_mismatch( - event_details.clone(), - CommandError::new_from_safe_message(e), - ) - })?; - - wait_until_port_is_open( - &TcpCheckSource::DnsName(qovery_teraform_config.aws_ec2_public_hostname.as_str()), - port, - 300, - kubernetes.logger(), + let port = qovery_teraform_config.kubernetes_port_to_u16().map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( event_details.clone(), + CommandError::new_from_safe_message(e), ) - .map_err(|e| { - EngineError::new_terraform_qovery_config_mismatch( - event_details.clone(), - CommandError::new(format!( - "Wasn't able to connect to Kubernetes API, can't continue. Did you manually performed changes AWS side?" - ), Some(format!("{:?}", e)), None), - ) - })?; - } - _ => {} - } + })?; + + wait_until_port_is_open( + &TcpCheckSource::DnsName(qovery_teraform_config.aws_ec2_public_hostname.as_str()), + port, + 300, + kubernetes.logger(), + event_details.clone(), + ) + .map_err(|e| { + EngineError::new_terraform_qovery_config_mismatch( + event_details.clone(), + CommandError::new("Wasn't able to connect to Kubernetes API, can't continue. Did you manually performed changes AWS side?".to_string(), Some(format!("{:?}", e)), None), + ) + })?; + }; // kubernetes helm deployments on the cluster let kubeconfig_path = kubernetes.get_kubeconfig_file_path()?; diff --git a/src/cloud_provider/utilities.rs b/src/cloud_provider/utilities.rs index 9a7b49f7..7be905ba 100644 --- a/src/cloud_provider/utilities.rs +++ b/src/cloud_provider/utilities.rs @@ -272,7 +272,7 @@ pub fn check_tcp_port_is_open(address: &TcpCheckSource, port: u16) -> Result<(), let timeout = core::time::Duration::from_secs(1); let ip = match address { - TcpCheckSource::SocketAddr(x) => x.clone(), + TcpCheckSource::SocketAddr(x) => *x, TcpCheckSource::DnsName(x) => { let address = format!("{}:{}", x, port); match address.to_socket_addrs() { diff --git a/tests/aws/aws_kubernetes.rs b/tests/aws/aws_kubernetes.rs index dbf4a136..3d5da4bb 100644 --- a/tests/aws/aws_kubernetes.rs +++ b/tests/aws/aws_kubernetes.rs @@ -3,6 +3,7 @@ extern crate test_utilities; use std::str::FromStr; use ::function_name::named; +use test_utilities::aws::{AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION}; use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType}; use test_utilities::utilities::{context, engine_run_test, generate_cluster_id, generate_id, logger}; @@ -23,15 +24,13 @@ fn create_and_destroy_eks_cluster( ) { engine_run_test(|| { let region = AwsRegion::from_str(region.as_str()).expect("Wasn't able to convert the desired region"); + let cluster_id = generate_cluster_id(region.to_string().as_str()); let zones = region.get_zones(); cluster_test( test_name, Kind::Aws, KKind::Eks, - context( - generate_id().as_str(), - generate_cluster_id(region.to_string().as_str()).as_str(), - ), + context(generate_id().as_str(), cluster_id.as_str()), logger(), region.to_aws_format().as_str(), Some(zones), From d589dd420f45b52fff058e3a2fbb6ed9bdc2ad52 Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 4 May 2022 17:17:00 +0200 Subject: [PATCH 106/122] chore(ENG_927): use official mysql chart (#704) --- .../charts/kube-prometheus-stack/values.yaml | 2 +- lib/common/services/mysql/Chart.lock | 6 + lib/common/services/mysql/Chart.yaml | 23 +- lib/common/services/mysql/README.md | 560 ++++-- .../services/mysql/charts/common/.helmignore | 22 + .../services/mysql/charts/common/Chart.yaml | 23 + .../services/mysql/charts/common/README.md | 347 ++++ .../charts/common/templates/_affinities.tpl | 102 ++ .../charts/common/templates/_capabilities.tpl | 139 ++ .../mysql/charts/common/templates/_errors.tpl | 23 + .../mysql/charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../mysql/charts/common/templates/_labels.tpl | 18 + .../mysql/charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../mysql/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 ++ .../common/templates/validations/_mongodb.tpl | 108 ++ .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../services/mysql/charts/common/values.yaml | 5 + .../mysql/ci/values-production-with-rbac.yaml | 30 + .../services/mysql/ci/values-production.yaml | 29 - .../docker-entrypoint-initdb.d/README.md | 3 - .../services/mysql/templates/_helpers.tpl | 441 ++--- .../services/mysql/templates/extra-list.yaml | 4 + .../templates/initialization-configmap.yaml | 22 - .../mysql/templates/master-configmap.yaml | 11 - .../mysql/templates/master-statefulset.yaml | 293 ---- .../services/mysql/templates/master-svc.yaml | 38 - .../services/mysql/templates/metrics-svc.yaml | 29 + .../mysql/templates/networkpolicy.yaml | 38 + .../mysql/templates/primary/configmap.yaml | 18 + .../primary/initialization-configmap.yaml | 14 + .../services/mysql/templates/primary/pdb.yaml | 25 + .../mysql/templates/primary/statefulset.yaml | 368 ++++ .../mysql/templates/primary/svc-headless.yaml | 24 + .../services/mysql/templates/primary/svc.yaml | 41 + lib/common/services/mysql/templates/role.yaml | 21 + .../services/mysql/templates/rolebinding.yaml | 21 + .../mysql/templates/secondary/configmap.yaml | 18 + .../mysql/templates/secondary/pdb.yaml | 25 + .../templates/secondary/statefulset.yaml | 338 ++++ .../templates/secondary/svc-headless.yaml | 26 + .../mysql/templates/secondary/svc.yaml | 43 + .../services/mysql/templates/secrets.yaml | 41 +- .../mysql/templates/serviceaccount.yaml | 23 +- .../mysql/templates/servicemonitor.yaml | 28 +- .../mysql/templates/slave-configmap.yaml | 11 - .../mysql/templates/slave-statefulset.yaml | 262 --- .../services/mysql/templates/slave-svc.yaml | 40 - .../services/mysql/values-production.yaml | 304 ---- lib/common/services/mysql/values.schema.json | 178 ++ lib/common/services/mysql/values.yaml | 1501 +++++++++++------ lib/helm-freeze.yaml | 3 +- test_utilities/src/common.rs | 265 +++ tests/aws/aws_databases.rs | 43 +- 62 files changed, 4830 insertions(+), 2051 deletions(-) create mode 100644 lib/common/services/mysql/Chart.lock create mode 100644 lib/common/services/mysql/charts/common/.helmignore create mode 100644 lib/common/services/mysql/charts/common/Chart.yaml create mode 100644 lib/common/services/mysql/charts/common/README.md create mode 100644 lib/common/services/mysql/charts/common/templates/_affinities.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_capabilities.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_errors.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_images.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_ingress.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_labels.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_names.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_secrets.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_storage.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_tplvalues.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_utils.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/_warnings.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_redis.tpl create mode 100644 lib/common/services/mysql/charts/common/templates/validations/_validations.tpl create mode 100644 lib/common/services/mysql/charts/common/values.yaml create mode 100644 lib/common/services/mysql/ci/values-production-with-rbac.yaml delete mode 100644 lib/common/services/mysql/ci/values-production.yaml delete mode 100644 lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md create mode 100644 lib/common/services/mysql/templates/extra-list.yaml delete mode 100644 lib/common/services/mysql/templates/initialization-configmap.yaml delete mode 100644 lib/common/services/mysql/templates/master-configmap.yaml delete mode 100644 lib/common/services/mysql/templates/master-statefulset.yaml delete mode 100644 lib/common/services/mysql/templates/master-svc.yaml create mode 100644 lib/common/services/mysql/templates/metrics-svc.yaml create mode 100644 lib/common/services/mysql/templates/networkpolicy.yaml create mode 100644 lib/common/services/mysql/templates/primary/configmap.yaml create mode 100644 lib/common/services/mysql/templates/primary/initialization-configmap.yaml create mode 100644 lib/common/services/mysql/templates/primary/pdb.yaml create mode 100644 lib/common/services/mysql/templates/primary/statefulset.yaml create mode 100644 lib/common/services/mysql/templates/primary/svc-headless.yaml create mode 100644 lib/common/services/mysql/templates/primary/svc.yaml create mode 100644 lib/common/services/mysql/templates/role.yaml create mode 100644 lib/common/services/mysql/templates/rolebinding.yaml create mode 100644 lib/common/services/mysql/templates/secondary/configmap.yaml create mode 100644 lib/common/services/mysql/templates/secondary/pdb.yaml create mode 100644 lib/common/services/mysql/templates/secondary/statefulset.yaml create mode 100644 lib/common/services/mysql/templates/secondary/svc-headless.yaml create mode 100644 lib/common/services/mysql/templates/secondary/svc.yaml delete mode 100644 lib/common/services/mysql/templates/slave-configmap.yaml delete mode 100644 lib/common/services/mysql/templates/slave-statefulset.yaml delete mode 100644 lib/common/services/mysql/templates/slave-svc.yaml delete mode 100644 lib/common/services/mysql/values-production.yaml create mode 100644 lib/common/services/mysql/values.schema.json diff --git a/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml b/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml index 07483cf9..73df2195 100644 --- a/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml +++ b/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml @@ -1377,7 +1377,7 @@ prometheusOperator: # Use certmanager to generate webhook certs certManager: - enabled: true + enabled: false # issuerRef: # name: "issuer" # kind: "ClusterIssuer" diff --git a/lib/common/services/mysql/Chart.lock b/lib/common/services/mysql/Chart.lock new file mode 100644 index 00000000..eb4df7fb --- /dev/null +++ b/lib/common/services/mysql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.1 +digest: sha256:1056dac8da880ed967a191e8d9eaf04766f77bda66a5715456d5dd4494a4a942 +generated: "2022-04-26T23:27:43.795807925Z" diff --git a/lib/common/services/mysql/Chart.yaml b/lib/common/services/mysql/Chart.yaml index 799cdc8d..f42519b3 100644 --- a/lib/common/services/mysql/Chart.yaml +++ b/lib/common/services/mysql/Chart.yaml @@ -1,19 +1,28 @@ -apiVersion: v1 -appVersion: 8.0.20 -description: Chart to create a Highly available MySQL cluster -engine: gotpl -home: https://mysql.com +annotations: + category: Database +apiVersion: v2 +appVersion: 8.0.29 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: MySQL is a fast, reliable, scalable, and easy to use open source relational + database system. Designed to handle mission-critical, heavy-load production applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/mysql icon: https://bitnami.com/assets/stacks/mysql/img/mysql-stack-220x234.png keywords: - mysql - database - sql - cluster -- high availablity +- high availability maintainers: - email: containers@bitnami.com name: Bitnami name: mysql sources: - https://github.com/bitnami/bitnami-docker-mysql -version: 6.14.2 +- https://mysql.com +version: 8.9.6 diff --git a/lib/common/services/mysql/README.md b/lib/common/services/mysql/README.md index 273102fb..e9618274 100644 --- a/lib/common/services/mysql/README.md +++ b/lib/common/services/mysql/README.md @@ -1,8 +1,14 @@ -# MySQL + -[MySQL](https://mysql.com) is a fast, reliable, scalable, and easy to use open-source relational database system. MySQL Server is intended for mission-critical, heavy-load production systems as well as for embedding into mass-deployed software. +# MySQL packaged by Bitnami -## TL;DR; +MySQL is a fast, reliable, scalable, and easy to use open source relational database system. Designed to handle mission-critical, heavy-load production applications. + +[Overview of MySQL](http://www.mysql.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR ```bash $ helm repo add bitnami https://charts.bitnami.com/bitnami @@ -11,14 +17,14 @@ $ helm install my-release bitnami/mysql ## Introduction -This chart bootstraps a [MySQL](https://github.com/bitnami/bitnami-docker-mysql) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +This chart bootstraps a [MySQL](https://github.com/bitnami/bitnami-docker-mysql) replication cluster deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. ## Prerequisites -- Kubernetes 1.12+ -- Helm 2.12+ or Helm 3.0-beta3+ +- Kubernetes 1.19+ +- Helm 3.2.0+ - PV provisioner support in the underlying infrastructure ## Installing the Chart @@ -46,141 +52,292 @@ The command removes all the Kubernetes components associated with the chart and ## Parameters -The following tables lists the configurable parameters of the MySQL chart and their default values. -| Parameter | Description | Default | | -|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `global.imageRegistry` | Global Docker image registry | `nil` | | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | | -| `image.registry` | MySQL image registry | `docker.io` | | -| `image.repository` | MySQL Image name | `bitnami/mysql` | | -| `image.tag` | MySQL Image tag | `{TAG_NAME}` | | -| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | | -| `image.debug` | Specify if debug logs should be enabled | `false` | | -| `nameOverride` | String to partially override mysql.fullname template with a string (will prepend the release name) | `nil` | | -| `fullnameOverride` | String to fully override mysql.fullname template with a string | `nil` | | -| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | | -| `volumePermissions.resources` | Init container resource requests/limit | `nil` | | -| `existingSecret` | Specify the name of an existing secret for password details (`root.password`, `db.password`, `replication.password` will be ignored and picked up from this secret). The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password`. | `nil` | | -| `root.password` | Password for the `root` user | _random 10 character alphanumeric string_ | | -| `root.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `root.injectSecretsAsVolume` | Mount admin user password as a file instead of using an environment variable | `false` | | -| `db.user` | Username of new user to create (should be different from replication.user) | `nil` | | -| `db.password` | Password for the new user | _random 10 character alphanumeric string if `db.user` is defined_ | | -| `db.name` | Name for new database to create | `my_database` | | -| `db.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `db.injectSecretsAsVolume` | Mount user password as a file instead of using an environment variable | `false` | | -| `replication.enabled` | MySQL replication enabled | `true` | | -| `replication.user` | MySQL replication user (should be different from db.user) | `replicator` | | -| `replication.password` | MySQL replication user password | _random 10 character alphanumeric string_ | | -| `replication.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `replication.injectSecretsAsVolume` | Mount user password as a file instead of using an environment variable | `false` | | -| `initdbScripts` | Dictionary of initdb scripts | `nil` | | -| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` | | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | | -| `serviceAccount.name` | If serviceAccount.create is enabled, what should the serviceAccount name be - otherwise defaults to the fullname | `nil` | | -| `master.config` | Config file for the MySQL Master server | `_default values in the values.yaml file_` | | -| `master.updateStrategy.type` | Master statefulset update strategy policy | `RollingUpdate` | | -| `master.podAnnotations` | Pod annotations for master nodes | `{}` | | -| `master.affinity` | Map of node/pod affinities for master nodes | `{}` (The value is evaluated as a template) | | -| `master.nodeSelector` | Node labels for pod assignment on master nodes | `{}` (The value is evaluated as a template) | | -| `master.tolerations` | Tolerations for pod assignment on master nodes | `[]` (The value is evaluated as a template) | | -| `master.securityContext.enabled` | Enable security context for master nodes | `true` | | -| `master.securityContext.fsGroup` | Group ID for the master nodes' containers | `1001` | | -| `master.securityContext.runAsUser` | User ID for the master nodes' containers | `1001` | | -| `master.containerSecurityContext` | Container security context for master nodes' containers | `{}` | | -| `master.resources` | CPU/Memory resource requests/limits for master nodes' containers | `{}` | | -| `master.livenessProbe.enabled` | Turn on and off liveness probe (master nodes) | `true` | | -| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master nodes) | `120` | | -| `master.livenessProbe.periodSeconds` | How often to perform the probe (master nodes) | `10` | | -| `master.livenessProbe.timeoutSeconds` | When the probe times out (master nodes) | `1` | | -| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (master nodes) | `1` | | -| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (master nodes) | `3` | | -| `master.readinessProbe.enabled` | Turn on and off readiness probe (master nodes) | `true` | | -| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master nodes) | `30` | | -| `master.readinessProbe.periodSeconds` | How often to perform the probe (master nodes) | `10` | | -| `master.readinessProbe.timeoutSeconds` | When the probe times out (master nodes) | `1` | | -| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (master nodes) | `1` | | -| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (master nodes) | `3` | | -| `master.extraEnvVars` | Array containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.extraEnvVarsCM` | Configmap containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.extraEnvVarsSecret` | Secret containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` (master nodes) | `true` | | -| `master.persistence.mountPath` | Configure `PersistentVolumeClaim` mount path (master nodes) | `/bitnami/mysql` | | -| `master.persistence.annotations` | Persistent Volume Claim annotations (master nodes) | `{}` | | -| `master.persistence.storageClass` | Persistent Volume Storage Class (master nodes) | `` | | -| `master.persistence.accessModes` | Persistent Volume Access Modes (master nodes) | `[ReadWriteOnce]` | | -| `master.persistence.size` | Persistent Volume Size (master nodes) | `8Gi` | | -| `master.persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (master nodes) | `nil` | | -| `slave.replicas` | Desired number of slave replicas | `1` | | -| `slave.updateStrategy.type` | Slave statefulset update strategy policy | `RollingUpdate` | | -| `slave.podAnnotations` | Pod annotations for slave nodes | `{}` | | -| `slave.affinity` | Map of node/pod affinities for slave nodes | `{}` (The value is evaluated as a template) | | -| `slave.nodeSelector` | Node labels for pod assignment on slave nodes | `{}` (The value is evaluated as a template) | | -| `slave.tolerations` | Tolerations for pod assignment on slave nodes | `[]` (The value is evaluated as a template) | | -| `slave.extraEnvVars` | Array containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.extraEnvVarsCM` | ConfigMap containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.extraEnvVarsSecret` | Secret containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.securityContext.enabled` | Enable security context for slave nodes | `true` | | -| `slave.securityContext.fsGroup` | Group ID for the slave nodes' containers | `1001` | | -| `slave.securityContext.runAsUser` | User ID for the slave nodes' containers | `1001` | | -| `slave.containerSecurityContext` | Container security context for slave nodes' containers | `{}` | | -| `slave.resources` | CPU/Memory resource requests/limits for slave nodes' containers | `{}` | | -| `slave.livenessProbe.enabled` | Turn on and off liveness probe (slave nodes) | `true` | | -| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (slave nodes) | `120` | | -| `slave.livenessProbe.periodSeconds` | How often to perform the probe (slave nodes) | `10` | | -| `slave.livenessProbe.timeoutSeconds` | When the probe times out (slave nodes) | `1` | | -| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (slave nodes) | `1` | | -| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave nodes) | `3` | | -| `slave.readinessProbe.enabled` | Turn on and off readiness probe (slave nodes) | `true` | | -| `slave.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (slave nodes) | `30` | | -| `slave.readinessProbe.periodSeconds` | How often to perform the probe (slave nodes) | `10` | | -| `slave.readinessProbe.timeoutSeconds` | When the probe times out (slave nodes) | `1` | | -| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (slave nodes) | `1` | | -| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave nodes) | `3` | | -| `slave.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` (slave nodes) | `true` | | -| `slave.persistence.mountPath` | Configure `PersistentVolumeClaim` mount path (slave nodes) | `/bitnami/mysql` | | -| `slave.persistence.annotations` | Persistent Volume Claim annotations (slave nodes) | `{}` | | -| `slave.persistence.storageClass` | Persistent Volume Storage Class (slave nodes) | `` | | -| `slave.persistence.accessModes` | Persistent Volume Access Modes (slave nodes) | `[ReadWriteOnce]` | | -| `slave.persistence.size` | Persistent Volume Size (slave nodes) | `8Gi` | | -| `slave.persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (slave nodes) | `nil` | | -| `service.type` | Kubernetes service type | `ClusterIP` | | -| `service.port` | MySQL service port | `3306` | | -| `service.nodePort.master` | Port to bind to for NodePort service type (master service) | `nil` | | -| `service.nodePort.slave` | Port to bind to for NodePort service type (slave service) | `nil` | | -| `service.loadBalancerIP.master` | Static IP Address to use for master LoadBalancer service type | `nil` | | -| `service.loadBalancerIP.slave` | Static IP Address to use for slaves LoadBalancer service type | `nil` | | -| `service.annotations` | Kubernetes service annotations | `{}` | | -| `metrics.enabled` | Start a side-car prometheus exporter | `false` | | -| `metrics.image` | Exporter image name | `bitnami/mysqld-exporter` | | -| `metrics.imageTag` | Exporter image tag | `{TAG_NAME}` | | -| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` | | -| `metrics.resources` | Exporter resource requests/limit | `nil` | | -| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | | -| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | | -| `metrics.service.annotations` | Prometheus exporter svc annotations | `{prometheus.io/scrape: "true", prometheus.io/port: "9104"}` | | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | | -| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | | -| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | | -| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `nil` | The above parameters map to the env variables defined in [bitnami/mysql](http://github.com/bitnami/bitnami-docker-mysql). For more information please refer to the [bitnami/mysql](http://github.com/bitnami/bitnami-docker-mysql) image documentation. | +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Cluster domain | `cluster.local` | +| `commonAnnotations` | Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonLabels` | Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `extraDeploy` | Array with extra yaml to deploy with the chart. Evaluated as a template | `[]` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MySQL common parameters + +| Name | Description | Value | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | MySQL image registry | `docker.io` | +| `image.repository` | MySQL image repository | `bitnami/mysql` | +| `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.29-debian-10-r0` | +| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `architecture` | MySQL architecture (`standalone` or `replication`) | `standalone` | +| `auth.rootPassword` | Password for the `root` user. Ignored if existing secret is provided | `""` | +| `auth.database` | Name for a custom database to create | `my_database` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the new user. Ignored if existing secret is provided | `""` | +| `auth.replicationUser` | MySQL replication user | `replicator` | +| `auth.replicationPassword` | MySQL replication user password. Ignored if existing secret is provided | `""` | +| `auth.existingSecret` | Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` | `""` | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `auth.customPasswordFiles` | Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` | `{}` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | + + +### MySQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------- | +| `primary.command` | Override default container command on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.args` | Override default container args on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.hostAliases` | Deployment pod host aliases | `[]` | +| `primary.configuration` | Configure MySQL Primary with a custom my.cnf file | `""` | +| `primary.existingConfigmap` | Name of existing ConfigMap with MySQL Primary configuration. | `""` | +| `primary.updateStrategy` | Update strategy type for the MySQL primary statefulset | `RollingUpdate` | +| `primary.rollingUpdatePartition` | Partition update strategy for MySQL Primary statefulset | `""` | +| `primary.podAnnotations` | Additional pod annotations for MySQL primary pods | `{}` | +| `primary.podAffinityPreset` | MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | MySQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | MySQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for MySQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for MySQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for MySQL primary pods assignment | `[]` | +| `primary.podSecurityContext.enabled` | Enable security context for MySQL primary pods | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `primary.containerSecurityContext.enabled` | MySQL primary container securityContext | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the MySQL primary container | `1001` | +| `primary.resources.limits` | The resources limits for MySQL primary containers | `{}` | +| `primary.resources.requests` | The requested resources for MySQL primary containers | `{}` | +| `primary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe | `true` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Override default liveness probe for MySQL primary containers | `{}` | +| `primary.customReadinessProbe` | Override default readiness probe for MySQL primary containers | `{}` | +| `primary.customStartupProbe` | Override default startup probe for MySQL primary containers | `{}` | +| `primary.extraFlags` | MySQL primary additional command line flags | `""` | +| `primary.extraEnvVars` | Extra environment variables to be set on MySQL primary containers | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL primary containers | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL primary containers | `""` | +| `primary.persistence.enabled` | Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir | `true` | +| `primary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MySQL primary replicas | `""` | +| `primary.persistence.storageClass` | MySQL primary persistent volume storage Class | `""` | +| `primary.persistence.annotations` | MySQL primary persistent volume claim annotations | `{}` | +| `primary.persistence.accessModes` | MySQL primary persistent volume access Modes | `["ReadWriteOnce"]` | +| `primary.persistence.size` | MySQL primary persistent volume size | `8Gi` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL Primary pod(s) | `[]` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) | `[]` | +| `primary.initContainers` | Add additional init containers for the MySQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers for the MySQL Primary pod(s) | `[]` | +| `primary.service.type` | MySQL Primary K8s service type | `ClusterIP` | +| `primary.service.port` | MySQL Primary K8s service port | `3306` | +| `primary.service.nodePort` | MySQL Primary K8s service node port | `""` | +| `primary.service.clusterIP` | MySQL Primary K8s service clusterIP IP | `""` | +| `primary.service.loadBalancerIP` | MySQL Primary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL Primary service is LoadBalancer | `[]` | +| `primary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `primary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL primary pods | `false` | +| `primary.pdb.minAvailable` | Minimum number/percentage of MySQL primary pods that should remain scheduled | `1` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of MySQL primary pods that may be made unavailable | `""` | +| `primary.podLabels` | MySQL Primary pod label. If labels are same as commonLabels , this will take precedence | `{}` | + + +### MySQL Secondary parameters + +| Name | Description | Value | +| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `secondary.replicaCount` | Number of MySQL secondary replicas | `1` | +| `secondary.hostAliases` | Deployment pod host aliases | `[]` | +| `secondary.command` | Override default container command on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.args` | Override default container args on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.configuration` | Configure MySQL Secondary with a custom my.cnf file | `""` | +| `secondary.existingConfigmap` | Name of existing ConfigMap with MySQL Secondary configuration. | `""` | +| `secondary.updateStrategy` | Update strategy type for the MySQL secondary statefulset | `RollingUpdate` | +| `secondary.rollingUpdatePartition` | Partition update strategy for MySQL Secondary statefulset | `""` | +| `secondary.podAnnotations` | Additional pod annotations for MySQL secondary pods | `{}` | +| `secondary.podAffinityPreset` | MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.podAntiAffinityPreset` | MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `secondary.nodeAffinityPreset.type` | MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.nodeAffinityPreset.key` | MySQL secondary node label key to match Ignored if `secondary.affinity` is set. | `""` | +| `secondary.nodeAffinityPreset.values` | MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. | `[]` | +| `secondary.affinity` | Affinity for MySQL secondary pods assignment | `{}` | +| `secondary.nodeSelector` | Node labels for MySQL secondary pods assignment | `{}` | +| `secondary.tolerations` | Tolerations for MySQL secondary pods assignment | `[]` | +| `secondary.podSecurityContext.enabled` | Enable security context for MySQL secondary pods | `true` | +| `secondary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `secondary.containerSecurityContext.enabled` | MySQL secondary container securityContext | `true` | +| `secondary.containerSecurityContext.runAsUser` | User ID for the MySQL secondary container | `1001` | +| `secondary.resources.limits` | The resources limits for MySQL secondary containers | `{}` | +| `secondary.resources.requests` | The requested resources for MySQL secondary containers | `{}` | +| `secondary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `secondary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `secondary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `secondary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `secondary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `secondary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `secondary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `secondary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `secondary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `secondary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `secondary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `secondary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `secondary.startupProbe.enabled` | Enable startupProbe | `true` | +| `secondary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `secondary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `secondary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `secondary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `secondary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `secondary.customLivenessProbe` | Override default liveness probe for MySQL secondary containers | `{}` | +| `secondary.customReadinessProbe` | Override default readiness probe for MySQL secondary containers | `{}` | +| `secondary.customStartupProbe` | Override default startup probe for MySQL secondary containers | `{}` | +| `secondary.extraFlags` | MySQL secondary additional command line flags | `""` | +| `secondary.extraEnvVars` | An array to add extra environment variables on MySQL secondary containers | `[]` | +| `secondary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL secondary containers | `""` | +| `secondary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL secondary containers | `""` | +| `secondary.persistence.enabled` | Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` | `true` | +| `secondary.persistence.storageClass` | MySQL secondary persistent volume storage Class | `""` | +| `secondary.persistence.annotations` | MySQL secondary persistent volume claim annotations | `{}` | +| `secondary.persistence.accessModes` | MySQL secondary persistent volume access Modes | `["ReadWriteOnce"]` | +| `secondary.persistence.size` | MySQL secondary persistent volume size | `8Gi` | +| `secondary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `secondary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL secondary pod(s) | `[]` | +| `secondary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) | `[]` | +| `secondary.initContainers` | Add additional init containers for the MySQL secondary pod(s) | `[]` | +| `secondary.sidecars` | Add additional sidecar containers for the MySQL secondary pod(s) | `[]` | +| `secondary.service.type` | MySQL secondary Kubernetes service type | `ClusterIP` | +| `secondary.service.port` | MySQL secondary Kubernetes service port | `3306` | +| `secondary.service.nodePort` | MySQL secondary Kubernetes service node port | `""` | +| `secondary.service.clusterIP` | MySQL secondary Kubernetes service clusterIP IP | `""` | +| `secondary.service.loadBalancerIP` | MySQL secondary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `secondary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `secondary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL secondary service is LoadBalancer | `[]` | +| `secondary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `secondary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL secondary pods | `false` | +| `secondary.pdb.minAvailable` | Minimum number/percentage of MySQL secondary pods that should remain scheduled | `1` | +| `secondary.pdb.maxUnavailable` | Maximum number/percentage of MySQL secondary pods that may be made unavailable | `""` | +| `secondary.podLabels` | Additional pod labels for MySQL secondary pods | `{}` | + + +### RBAC parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MySQL pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.annotations` | Annotations for MySQL Service Account | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Network Policy + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | The Policy model to apply. | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r408` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources` | Init container volume-permissions resources | `{}` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-10-r52` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | +| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | +| `metrics.service.annotations` | Prometheus exporter service annotations | `{}` | +| `metrics.extraArgs.primary` | Extra args to be passed to mysqld_exporter on Primary pods | `[]` | +| `metrics.extraArgs.secondary` | Extra args to be passed to mysqld_exporter on Secondary pods | `[]` | +| `metrics.resources.limits` | The resources limits for MySQL prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for MySQL prometheus exporter containers | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | + + +The above parameters map to the env variables defined in [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql). For more information please refer to the [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql) image documentation. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, ```bash $ helm install my-release \ - --set root.password=secretpassword,user.database=app_database \ + --set auth.rootPassword=secretpassword,auth.database=app_database \ bitnami/mysql ``` The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a database named `app_database`. +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash @@ -197,64 +354,115 @@ It is strongly recommended to use immutable tags in a production environment. Th Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. -### Production configuration +### Use a different MySQL version -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/change-image-version/). -- Force users to specify a password: -```diff -- root.forcePassword: false -+ root.forcePassword: true +### Customize a new MySQL instance -- db.forcePassword: false -+ db.forcePassword: true - -- replication.forcePassword: false -+ replication.forcePassword: true -``` - -- Desired number of slave replicas: -```diff -- slave.replicas: 1 -+ slave.replicas: 2 -``` - -- Start a side-car prometheus exporter: -```diff -- metrics.enabled: false -+ metrics.enabled: true -``` - -### Initialize a fresh instance - -The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. +The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. The allowed extensions are `.sh`, `.sql` and `.sql.gz`. +These scripts are treated differently depending on their extension. While `.sh` scripts are executed on all the nodes, `.sql` and `.sql.gz` scripts are only executed on the primary nodes. This is because `.sh` scripts support conditional tests to identify the type of node they are running on, while such tests are not supported in `.sql` or `sql.gz` files. + +Refer to the [chart documentation for more information and a usage example](http://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/customize-new-instance/). + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MySQL, you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + ## Persistence The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can be defined. +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. -### Adjust permissions of persistent volume mountpoint +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). -As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. +## Network Policy -By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. -As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. +To enable network policy for MySQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. -You can enable this initContainer by setting `volumePermissions.enabled` to `true`. +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 3306. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to MySQL. +This label will be displayed in the output of a successful install. + +## Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading -It's necessary to set the `root.password` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: +It's necessary to set the `auth.rootPassword` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: ```bash -$ helm upgrade my-release bitnami/mysql --set root.password=[ROOT_PASSWORD] +$ helm upgrade my-release bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] ``` -| Note: you need to substitue the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 8.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The terms *master* and *slave* have been replaced by the terms *primary* and *secondary*. Therefore, parameters prefixed with `master` or `slave` are now prefixed with `primary` or `secondary`, respectively. + - Credentials parameters are reorganized under the `auth` parameter. + - `replication.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MySQL chart, and migrate the data from your previous release. You have 2 alternatives to do so: + - Create a backup of the database, and restore it on the new release using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html). + - Reuse the PVC used to hold the master data on your previous release. To do so, use the `primary.persistence.existingClaim` parameter. The following example assumes that the release name is `mysql`: + +```bash +$ helm install mysql bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] --set primary.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[ROOT_PASSWORD]_ with the root password used in your previous release. + +### To 7.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mysql/administration/upgrade-helm3/). ### To 3.0.0 @@ -265,3 +473,19 @@ Use the workaround below to upgrade from versions previous to 3.0.0. The followi $ kubectl delete statefulset mysql-master --cascade=false $ kubectl delete statefulset mysql-slave --cascade=false ``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/lib/common/services/mysql/charts/common/.helmignore b/lib/common/services/mysql/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/common/services/mysql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/common/services/mysql/charts/common/Chart.yaml b/lib/common/services/mysql/charts/common/Chart.yaml new file mode 100644 index 00000000..e8d2db9d --- /dev/null +++ b/lib/common/services/mysql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.1 diff --git a/lib/common/services/mysql/charts/common/README.md b/lib/common/services/mysql/charts/common/README.md new file mode 100644 index 00000000..88d13b1d --- /dev/null +++ b/lib/common/services/mysql/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/lib/common/services/mysql/charts/common/templates/_affinities.tpl b/lib/common/services/mysql/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..189ea403 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_capabilities.tpl b/lib/common/services/mysql/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..4ec8321e --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_errors.tpl b/lib/common/services/mysql/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..a79cc2e3 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_images.tpl b/lib/common/services/mysql/charts/common/templates/_images.tpl new file mode 100644 index 00000000..42ffbc72 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_ingress.tpl b/lib/common/services/mysql/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..8caf73a6 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_labels.tpl b/lib/common/services/mysql/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..252066c7 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_names.tpl b/lib/common/services/mysql/charts/common/templates/_names.tpl new file mode 100644 index 00000000..c8574d17 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/lib/common/services/mysql/charts/common/templates/_secrets.tpl b/lib/common/services/mysql/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a53fb44f --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_storage.tpl b/lib/common/services/mysql/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..60e2a844 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl b/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..2db16685 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_utils.tpl b/lib/common/services/mysql/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..ea083a24 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_warnings.tpl b/lib/common/services/mysql/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..ae10fa41 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl b/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..ded1ae3b --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl b/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..b6906ff7 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl b/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..a071ea4d --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl b/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..164ec0d0 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl b/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..5d72959b --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl b/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..9a814cf4 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/values.yaml b/lib/common/services/mysql/charts/common/values.yaml new file mode 100644 index 00000000..f2df68e5 --- /dev/null +++ b/lib/common/services/mysql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/lib/common/services/mysql/ci/values-production-with-rbac.yaml b/lib/common/services/mysql/ci/values-production-with-rbac.yaml new file mode 100644 index 00000000..d3370c93 --- /dev/null +++ b/lib/common/services/mysql/ci/values-production-with-rbac.yaml @@ -0,0 +1,30 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +architecture: replication +auth: + usePasswordFiles: true + +primary: + extraEnvVars: + - name: TEST + value: "3" + podDisruptionBudget: + create: true + +secondary: + replicaCount: 2 + extraEnvVars: + - name: TEST + value: "2" + podDisruptionBudget: + create: true + +serviceAccount: + create: true + name: mysql-service-account +rbac: + create: true + +metrics: + enabled: true diff --git a/lib/common/services/mysql/ci/values-production.yaml b/lib/common/services/mysql/ci/values-production.yaml deleted file mode 100644 index 072fd062..00000000 --- a/lib/common/services/mysql/ci/values-production.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Test values file for generating all of the yaml and check that -# the rendering is correct - -volumePermissions: - enabled: true - -master: - extraEnvVars: - - name: TEST - value: "3" - - extraEnvVarsSecret: example-secret - extraEnvVarsCM: example-cm - -slave: - extraEnvVars: - - name: TEST - value: "2" - - extraEnvVarsSecret: example-secret-2 - extraEnvVarsCM: example-cm-2 - replicas: 2 - -metrics: - enabled: true - ## Kubeval doesn't recognise ServiceMonitor as a valid K8s object - # serviceMonitor: - # enabled: true - diff --git a/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md b/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md deleted file mode 100644 index c7257d74..00000000 --- a/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md +++ /dev/null @@ -1,3 +0,0 @@ -You can copy here your custom .sh, .sql or .sql.gz file so they are executed during the first boot of the image. - -More info in the [bitnami-docker-mysql](https://github.com/bitnami/bitnami-docker-mysql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/lib/common/services/mysql/templates/_helpers.tpl b/lib/common/services/mysql/templates/_helpers.tpl index bc5933ff..98b23466 100644 --- a/lib/common/services/mysql/templates/_helpers.tpl +++ b/lib/common/services/mysql/templates/_helpers.tpl @@ -1,97 +1,43 @@ {{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "mysql.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "mysql.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- define "mysql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} +{{- printf "%s-%s" (include "common.names.fullname" .) "primary" | trunc 63 | trimSuffix "-" -}} {{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} +{{- include "common.names.fullname" . -}} {{- end -}} {{- end -}} -{{- define "mysql.master.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- printf "%s-%s" .Values.fullnameOverride "master" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name "master" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name "master" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "mysql.slave.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- printf "%s-%s" .Values.fullnameOverride "slave" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name "slave" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name "slave" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "mysql.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "mysql.labels" -}} -app: {{ include "mysql.name" . }} -chart: {{ include "mysql.chart" . }} -release: {{ .Release.Name }} -heritage: {{ .Release.Service }} -{{- end -}} - -{{/* -Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector -*/}} -{{- define "mysql.matchLabels" -}} -app: {{ include "mysql.name" . }} -release: {{ .Release.Name }} +{{- define "mysql.secondary.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "secondary" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Return the proper MySQL image name */}} {{- define "mysql.image" -}} -{{- $registryName := .Values.image.registry -}} -{{- $repositoryName := .Values.image.repository -}} -{{- $tag := .Values.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} {{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mysql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mysql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mysql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} {{- end -}} {{ template "mysql.initdbScriptsCM" . }} @@ -100,216 +46,12 @@ Get the initialization scripts ConfigMap name. */}} {{- define "mysql.initdbScriptsCM" -}} {{- if .Values.initdbScriptsConfigMap -}} -{{- printf "%s" .Values.initdbScriptsConfigMap -}} + {{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} {{- else -}} -{{- printf "%s-init-scripts" (include "mysql.master.fullname" .) -}} + {{- printf "%s-init-scripts" (include "mysql.primary.fullname" .) -}} {{- end -}} {{- end -}} -{{/* -Return the proper MySQL metrics exporter image name -*/}} -{{- define "mysql.metrics.image" -}} -{{- $registryName := .Values.metrics.image.registry -}} -{{- $repositoryName := .Values.metrics.image.repository -}} -{{- $tag := .Values.metrics.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "mysql.imagePullSecrets" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -Also, we can not use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "mysql.volumePermissions.image" -}} -{{- $registryName := .Values.volumePermissions.image.registry -}} -{{- $repositoryName := .Values.volumePermissions.image.repository -}} -{{- $tag := .Values.volumePermissions.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class for the master -*/}} -{{- define "mysql.master.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class for the slave -*/}} -{{- define "mysql.slave.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Renders a value that contains template. -Usage: -{{ include "mysql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "mysql.tplValue" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} - -{{/* -Compile all warnings into a single message, and call fail. -*/}} -{{- define "mysql.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "mysql.validateValues.loadBalancerIPareNotEquals" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} -{{- end -}} -{{- end -}} - -{{/* Validate values of MySql - must provide different IPs */}} -{{- define "mysql.validateValues.loadBalancerIPareNotEquals" -}} -{{- if not (empty .Values.service.loadBalancerIP) -}} -{{- if eq (.Values.service.loadBalancerIP.master | quote) (.Values.service.loadBalancerIP.slave | quote) }} -mysql: service.loadBalancerIP - loadBalancerIP.master is equal to loadBalancerIP.slave which is not possible. - Please set a different ip for master and slave services. -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* Check if there are rolling tags in the images */}} -{{- define "mysql.checkRollingTags" -}} -{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end -}} -{{- end -}} - - {{/* Returns the proper service account name depending if an explicit service account name is set in the values file. If the name is not set it will default to either mysql.fullname if serviceAccount.create @@ -317,15 +59,134 @@ WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.t */}} {{- define "mysql.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "mysql.fullname" .) .Values.serviceAccount.name }} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} {{/* -Returns chart secret name. If existingSecret is not set it will default to mysql.fullname +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.primary.createConfigmap" -}} +{{- if and .Values.primary.configuration (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.secondary.configmapName" -}} +{{- if .Values.secondary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.secondary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.secondary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.secondary.createConfigmap" -}} +{{- if and (eq .Values.architecture "replication") .Values.secondary.configuration (not .Values.secondary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MySQL credentials */}} {{- define "mysql.secretName" -}} -{{ default (include "mysql.fullname" .) .Values.existingSecret }} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MySQL +*/}} +{{- define "mysql.createSecret" -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "mysql.root.password" -}} + {{- if not (empty .Values.auth.rootPassword) }} + {{- .Values.auth.rootPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-root-password") }} + {{- else }} + {{- required "A MySQL Root Password is required!" .Values.auth.rootPassword }} + {{- end }} +{{- end -}} + +{{- define "mysql.password" -}} + {{- if and (not (empty .Values.auth.username)) (not (empty .Values.auth.password)) }} + {{- .Values.auth.password }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-password") }} + {{- else }} + {{- required "A MySQL Database Password is required!" .Values.auth.password }} + {{- end }} +{{- end -}} + +{{- define "mysql.replication.password" -}} + {{- if not (empty .Values.auth.replicationPassword) }} + {{- .Values.auth.replicationPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-replication-password") }} + {{- else }} + {{- required "A MySQL Replication Password is required!" .Values.auth.replicationPassword }} + {{- end }} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mysql.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mysql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} {{- end -}} diff --git a/lib/common/services/mysql/templates/extra-list.yaml b/lib/common/services/mysql/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/lib/common/services/mysql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/lib/common/services/mysql/templates/initialization-configmap.yaml b/lib/common/services/mysql/templates/initialization-configmap.yaml deleted file mode 100644 index 6bf5689b..00000000 --- a/lib/common/services/mysql/templates/initialization-configmap.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.master.fullname" . }}-init-scripts - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz") (not .Values.initdbScriptsConfigMap) }} -binaryData: -{{- $root := . }} -{{- range $path, $bytes := .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} - {{ base $path }}: {{ $root.Files.Get $path | b64enc | quote }} -{{- end }} -{{- end }} -data: -{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}") (not .Values.initdbScriptsConfigMap) }} -{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}").AsConfig | indent 2 }} -{{- end }} -{{- with .Values.initdbScripts }} -{{ toYaml . | indent 2 }} -{{- end }} -{{- end }} diff --git a/lib/common/services/mysql/templates/master-configmap.yaml b/lib/common/services/mysql/templates/master-configmap.yaml deleted file mode 100644 index fdbbfe6f..00000000 --- a/lib/common/services/mysql/templates/master-configmap.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.master.config }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.master.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -data: - my.cnf: |- -{{ .Values.master.config | indent 4 }} -{{- end -}} diff --git a/lib/common/services/mysql/templates/master-statefulset.yaml b/lib/common/services/mysql/templates/master-statefulset.yaml deleted file mode 100644 index eb31d015..00000000 --- a/lib/common/services/mysql/templates/master-statefulset.yaml +++ /dev/null @@ -1,293 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "mysql.master.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -{{- if .Values.commonLabels }} - {{- toYaml .Values.commonLabels | nindent 4 }} -{{- end }} -spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: master - serviceName: {{ template "mysql.master.fullname" . }} - replicas: 1 - updateStrategy: - type: {{ .Values.master.updateStrategy.type }} - {{- if (eq "Recreate" .Values.master.updateStrategy.type) }} - rollingUpdate: null - {{- end }} - template: - metadata: - labels: {{- include "mysql.labels" . | nindent 8 }} - component: master -{{- if .Values.commonLabels }} - {{- toYaml .Values.commonLabels | nindent 8 }} -{{- end }} - {{- if .Values.master.podAnnotations }} - annotations: {{ include "mysql.tplValue" ( dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} - {{- end }} - spec: -{{- include "mysql.imagePullSecrets" . | indent 6 }} - {{- if .Values.master.affinity }} - affinity: {{- include "mysql.tplValue" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.nodeSelector }} - nodeSelector: {{- include "mysql.tplValue" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: {{- include "mysql.tplValue" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.master.securityContext.fsGroup }} - runAsUser: {{ .Values.master.securityContext.runAsUser }} - {{- end }} - serviceAccountName: {{ template "mysql.serviceAccountName" . }} - {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} - initContainers: - - name: volume-permissions - image: {{ template "mysql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: - - /bin/bash - - -ec - - | - chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} {{ .Values.master.persistence.mountPath }} - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.master.persistence.mountPath }} - {{- end }} - containers: - - name: mysql - image: {{ template "mysql.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - {{- if .Values.master.containerSecurityContext }} - securityContext: {{- toYaml .Values.master.containerSecurityContext | nindent 12 }} - {{- end }} - env: - - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" .Values.image.debug | quote }} - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - {{- if .Values.db.user }} - - name: MYSQL_USER - value: {{ .Values.db.user | quote }} - {{- if .Values.db.injectSecretsAsVolume }} - - name: MYSQL_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-password" - {{- else }} - - name: MYSQL_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-password - {{- end }} - {{- end }} - - name: MYSQL_DATABASE - value: {{ .Values.db.name | quote }} - {{- if .Values.replication.enabled }} - - name: MYSQL_REPLICATION_MODE - value: "master" - - name: MYSQL_REPLICATION_USER - value: {{ .Values.replication.user | quote }} - {{- if .Values.replication.injectSecretsAsVolume }} - - name: MYSQL_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-replication-password" - {{- else }} - - name: MYSQL_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-replication-password - {{- end }} - {{- end }} - {{- if .Values.master.extraEnvVars }} - {{- include "mysql.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} - envFrom: - {{- if .Values.master.extraEnvVarsCM }} - - configMapRef: - name: {{ .Values.master.extraEnvVarsCM }} - {{- end }} - {{- if .Values.master.extraEnvVarsSecret }} - - secretRef: - name: {{ .Values.master.extraEnvVarsSecret }} - {{- end }} - {{- end }} - ports: - - name: mysql - containerPort: 3306 - {{- if .Values.master.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.resources }} - resources: {{- toYaml .Values.master.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.master.persistence.mountPath }} - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - {{- end }} - {{- if .Values.master.config }} - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - mountPath: /opt/bitnami/mysql/secrets/ - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "mysql.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - env: - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - command: - - /bin/sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter - ports: - - name: metrics - containerPort: 9104 - livenessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 15 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 5 - timeoutSeconds: 1 - {{- if .Values.metrics.resources }} - resources: {{- toYaml .Values.metrics.resources | nindent 12 }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume }} - volumeMounts: - - name: mysql-credentials - mountPath: /opt/bitnami/mysqld-exporter/secrets/ - {{- end }} - {{- end }} - volumes: - {{- if .Values.master.config }} - - name: config - configMap: - name: {{ template "mysql.master.fullname" . }} - {{- end }} - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - configMap: - name: {{ template "mysql.initdbScriptsCM" . }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - secret: - secretName: {{ template "mysql.fullname" . }} - items: - {{- if .Values.db.injectSecretsAsVolume }} - - key: mysql-password - path: mysql-password - {{- end }} - {{- if .Values.root.injectSecretsAsVolume }} - - key: mysql-root-password - path: mysql-root-password - {{- end }} - {{- if .Values.replication.injectSecretsAsVolume }} - - key: mysql-replication-password - path: mysql-replication-password - {{- end }} - {{- end }} -{{- if not .Values.master.persistence.enabled }} - - name: "data" - emptyDir: {} -{{- else if and .Values.master.persistence.enabled .Values.master.persistence.existingClaim }} - - name: "data" - persistentVolumeClaim: - claimName: {{ .Values.master.persistence.existingClaim }} -{{- else if and .Values.master.persistence.enabled (not .Values.master.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - labels: - app: {{ template "mysql.name" . }} - component: master - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.master.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.master.persistence.size | quote }} - {{ include "mysql.master.storageClass" . }} -{{- end }} diff --git a/lib/common/services/mysql/templates/master-svc.yaml b/lib/common/services/mysql/templates/master-svc.yaml deleted file mode 100644 index 790f2df5..00000000 --- a/lib/common/services/mysql/templates/master-svc.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.service.name }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master - {{- if or .Values.service.annotations .Values.metrics.service.annotations }} - annotations: - {{- if .Values.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.metrics.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} - {{- if not (empty .Values.service.loadBalancerIP.master) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP.master }} - {{- end }} - {{- end }} - ports: - - name: mysql - port: {{ .Values.service.port }} - targetPort: mysql - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} - nodePort: {{ .Values.service.nodePort.master }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - port: {{ .Values.metrics.service.port }} - targetPort: metrics - {{- end }} - selector: {{- include "mysql.matchLabels" . | nindent 4 }} - component: master diff --git a/lib/common/services/mysql/templates/metrics-svc.yaml b/lib/common/services/mysql/templates/metrics-svc.yaml new file mode 100644 index 00000000..fb0d9d76 --- /dev/null +++ b/lib/common/services/mysql/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} +{{- end }} diff --git a/lib/common/services/mysql/templates/networkpolicy.yaml b/lib/common/services/mysql/templates/networkpolicy.yaml new file mode 100644 index 00000000..a0d1d01d --- /dev/null +++ b/lib/common/services/mysql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.primary.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes + - ports: + - port: 9104 + {{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/primary/configmap.yaml b/lib/common/services/mysql/templates/primary/configmap.yaml new file mode 100644 index 00000000..540b7b90 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.primary.configuration | nindent 4 }} +{{- end -}} diff --git a/lib/common/services/mysql/templates/primary/initialization-configmap.yaml b/lib/common/services/mysql/templates/primary/initialization-configmap.yaml new file mode 100644 index 00000000..83cbaea7 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mysql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{ end }} diff --git a/lib/common/services/mysql/templates/primary/pdb.yaml b/lib/common/services/mysql/templates/primary/pdb.yaml new file mode 100644 index 00000000..106ad520 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.primary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if .Values.primary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/lib/common/services/mysql/templates/primary/statefulset.yaml b/lib/common/services/mysql/templates/primary/statefulset.yaml new file mode 100644 index 00000000..6f9c99ea --- /dev/null +++ b/lib/common/services/mysql/templates/primary/statefulset.yaml @@ -0,0 +1,368 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + serviceName: {{ include "mysql.primary.fullname" . }} + updateStrategy: + type: {{ .Values.primary.updateStrategy }} + {{- if (eq "Recreate" .Values.primary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.primary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.primary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mysql.serviceAccountName" . }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.primary.initContainers (and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled) }} + initContainers: + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: MYSQL_USER + value: {{ .Values.auth.username | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-password" .Values.auth.customPasswordFiles.user }} + {{- else }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-password + {{- end }} + {{- end }} + - name: MYSQL_DATABASE + value: {{ .Values.auth.database | quote }} + {{- if eq .Values.architecture "replication" }} + - name: MYSQL_REPLICATION_MODE + value: "master" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.primary.extraFlags }}" + {{- end }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.primary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.primary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- omit .Values.primary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{ toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.primary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "mysql.initdbScriptsCM" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ include "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-password + path: mysql-password + {{- if eq .Values.architecture "replication" }} + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim . }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if and .Values.primary.persistence.enabled (not .Values.primary.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: primary + {{- if .Values.primary.persistence.annotations }} + annotations: + {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/lib/common/services/mysql/templates/primary/svc-headless.yaml b/lib/common/services/mysql/templates/primary/svc-headless.yaml new file mode 100644 index 00000000..49e6e579 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/svc-headless.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/lib/common/services/mysql/templates/primary/svc.yaml b/lib/common/services/mysql/templates/primary/svc.yaml new file mode 100644 index 00000000..b46e6faa --- /dev/null +++ b/lib/common/services/mysql/templates/primary/svc.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.primary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) .Values.primary.service.nodePort) }} + nodePort: {{ .Values.primary.service.nodePort }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/lib/common/services/mysql/templates/role.yaml b/lib/common/services/mysql/templates/role.yaml new file mode 100644 index 00000000..4cbdd5c9 --- /dev/null +++ b/lib/common/services/mysql/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- end }} diff --git a/lib/common/services/mysql/templates/rolebinding.yaml b/lib/common/services/mysql/templates/rolebinding.yaml new file mode 100644 index 00000000..90ede32f --- /dev/null +++ b/lib/common/services/mysql/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "mysql.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . -}} +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/configmap.yaml b/lib/common/services/mysql/templates/secondary/configmap.yaml new file mode 100644 index 00000000..682e3e19 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.secondary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.secondary.configuration | nindent 4 }} +{{- end -}} diff --git a/lib/common/services/mysql/templates/secondary/pdb.yaml b/lib/common/services/mysql/templates/secondary/pdb.yaml new file mode 100644 index 00000000..49c7e167 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replication") .Values.secondary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.secondary.pdb.minAvailable }} + minAvailable: {{ .Values.secondary.pdb.minAvailable }} + {{- end }} + {{- if .Values.secondary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.secondary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/statefulset.yaml b/lib/common/services/mysql/templates/secondary/statefulset.yaml new file mode 100644 index 00000000..ef196ebf --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/statefulset.yaml @@ -0,0 +1,338 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.secondary.replicaCount }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary + serviceName: {{ include "mysql.secondary.fullname" . }} + updateStrategy: + type: {{ .Values.secondary.updateStrategy }} + {{- if (eq "Recreate" .Values.secondary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.secondary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.secondary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.secondary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/secondary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.secondary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.secondary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mysql.serviceAccountName" . }} + {{- if .Values.secondary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAntiAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.secondary.nodeAffinityPreset.type "key" .Values.secondary.nodeAffinityPreset.key "values" .Values.secondary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.secondary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.secondary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.secondary.initContainers (and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled) }} + initContainers: + {{- if .Values.secondary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.secondary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.secondary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.secondary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MYSQL_REPLICATION_MODE + value: "slave" + - name: MYSQL_MASTER_HOST + value: {{ include "mysql.primary.fullname" . }} + - name: MYSQL_MASTER_PORT_NUMBER + value: {{ .Values.primary.service.port | quote }} + - name: MYSQL_MASTER_ROOT_USER + value: "root" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.secondary.extraFlags }}" + {{- end }} + {{- if .Values.secondary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.secondary.extraEnvVarsCM .Values.secondary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.secondary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.secondary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.secondary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.secondary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.secondary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.secondary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.secondary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.startupProbe.enabled }} + startupProbe: {{- omit .Values.secondary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.secondary.resources }} + resources: {{ toYaml .Values.secondary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.secondary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.secondary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.secondary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.secondary.configmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ template "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.secondary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: secondary + {{- if .Values.secondary.persistence.annotations }} + annotations: + {{- toYaml .Values.secondary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.secondary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.secondary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.secondary.persistence "global" .Values.global) }} + {{- if .Values.secondary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/svc-headless.yaml b/lib/common/services/mysql/templates/secondary/svc-headless.yaml new file mode 100644 index 00000000..703d8e74 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/svc-headless.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/svc.yaml b/lib/common/services/mysql/templates/secondary/svc.yaml new file mode 100644 index 00000000..74a4c6ef --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/svc.yaml @@ -0,0 +1,43 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.secondary.service.type }} + {{- if and (eq .Values.secondary.service.type "ClusterIP") .Values.secondary.service.clusterIP }} + clusterIP: {{ .Values.secondary.service.clusterIP }} + {{- end }} + {{- if and .Values.secondary.service.loadBalancerIP (eq .Values.secondary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.secondary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") .Values.secondary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.secondary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.secondary.service.type "NodePort") (eq .Values.secondary.service.type "LoadBalancer")) .Values.secondary.service.nodePort) }} + nodePort: {{ .Values.secondary.service.nodePort }} + {{- else if eq .Values.secondary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secrets.yaml b/lib/common/services/mysql/templates/secrets.yaml index 888cd5e0..9412fc35 100644 --- a/lib/common/services/mysql/templates/secrets.yaml +++ b/lib/common/services/mysql/templates/secrets.yaml @@ -1,34 +1,21 @@ -{{- if (not .Values.existingSecret) -}} +{{- if eq (include "mysql.createSecret" .) "true" }} apiVersion: v1 kind: Secret metadata: - name: {{ template "mysql.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} type: Opaque data: - {{- if .Values.root.password }} - mysql-root-password: {{ .Values.root.password | b64enc | quote }} - {{- else if (not .Values.root.forcePassword) }} - mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }} - {{ else }} - mysql-root-password: {{ required "A MySQL Root Password is required!" .Values.root.password }} - {{- end }} - {{- if .Values.db.user }} - {{- if .Values.db.password }} - mysql-password: {{ .Values.db.password | b64enc | quote }} - {{- else if (not .Values.db.forcePassword) }} - mysql-password: {{ randAlphaNum 10 | b64enc | quote }} - {{- else }} - mysql-password: {{ required "A MySQL Database Password is required!" .Values.db.password }} - {{- end }} - {{- end }} - {{- if .Values.replication.enabled }} - {{- if .Values.replication.password }} - mysql-replication-password: {{ .Values.replication.password | b64enc | quote }} - {{- else if (not .Values.replication.forcePassword) }} - mysql-replication-password: {{ randAlphaNum 10 | b64enc | quote }} - {{- else }} - mysql-replication-password: {{ required "A MySQL Replication Password is required!" .Values.replication.password }} - {{- end }} + mysql-root-password: {{ include "mysql.root.password" . | b64enc | quote }} + mysql-password: {{ include "mysql.password" . | b64enc | quote }} + {{- if eq .Values.architecture "replication" }} + mysql-replication-password: {{ include "mysql.replication.password" . | b64enc | quote }} {{- end }} {{- end }} diff --git a/lib/common/services/mysql/templates/serviceaccount.yaml b/lib/common/services/mysql/templates/serviceaccount.yaml index ebde86c7..59eb1040 100644 --- a/lib/common/services/mysql/templates/serviceaccount.yaml +++ b/lib/common/services/mysql/templates/serviceaccount.yaml @@ -2,12 +2,21 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "mysql.serviceAccountName" . }} - labels: - app: {{ template "mysql.name" . }} - chart: {{ template "mysql.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" + name: {{ include "mysql.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- if (not .Values.auth.customPasswordFiles) }} secrets: - name: {{ template "mysql.secretName" . }} - {{- end }} +{{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/servicemonitor.yaml b/lib/common/services/mysql/templates/servicemonitor.yaml index 6dad49f0..f082dd54 100644 --- a/lib/common/services/mysql/templates/servicemonitor.yaml +++ b/lib/common/services/mysql/templates/servicemonitor.yaml @@ -2,18 +2,23 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "mysql.fullname" . }} + name: {{ include "common.names.fullname" . }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} {{- end }} - labels: {{- include "mysql.labels" . | nindent 4 }} - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: master endpoints: - port: metrics {{- if .Values.metrics.serviceMonitor.interval }} @@ -22,7 +27,16 @@ spec: {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} namespaceSelector: matchNames: - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics {{- end }} diff --git a/lib/common/services/mysql/templates/slave-configmap.yaml b/lib/common/services/mysql/templates/slave-configmap.yaml deleted file mode 100644 index fbaeb78c..00000000 --- a/lib/common/services/mysql/templates/slave-configmap.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if and .Values.replication.enabled .Values.slave.config }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave -data: - my.cnf: |- -{{ .Values.slave.config | indent 4 }} -{{- end }} diff --git a/lib/common/services/mysql/templates/slave-statefulset.yaml b/lib/common/services/mysql/templates/slave-statefulset.yaml deleted file mode 100644 index f8a78dd3..00000000 --- a/lib/common/services/mysql/templates/slave-statefulset.yaml +++ /dev/null @@ -1,262 +0,0 @@ -{{- if .Values.replication.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave -spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: slave - serviceName: "{{ template "mysql.slave.fullname" . }}" - replicas: {{ .Values.slave.replicas }} - updateStrategy: - type: {{ .Values.slave.updateStrategy.type }} - {{- if (eq "Recreate" .Values.slave.updateStrategy.type) }} - rollingUpdate: null - {{- end }} - template: - metadata: - labels: {{- include "mysql.labels" . | nindent 8 }} - component: slave - {{- if .Values.slave.podAnnotations }} - annotations: {{ include "mysql.tplValue" ( dict "value" .Values.slave.podAnnotations "context" $) | nindent 8 }} - {{- end }} - spec: -{{- include "mysql.imagePullSecrets" . | indent 6 }} - {{- if .Values.slave.affinity }} - affinity: {{- include "mysql.tplValue" (dict "value" .Values.slave.affinity "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.nodeSelector }} - nodeSelector: {{- include "mysql.tplValue" (dict "value" .Values.slave.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.tolerations }} - tolerations: {{- include "mysql.tplValue" (dict "value" .Values.slave.tolerations "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.slave.securityContext.fsGroup }} - runAsUser: {{ .Values.slave.securityContext.runAsUser }} - {{- end }} - serviceAccountName: {{ template "mysql.serviceAccountName" . }} - {{- if and .Values.volumePermissions.enabled .Values.slave.persistence.enabled }} - initContainers: - - name: volume-permissions - image: {{ template "mysql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - {{- if .Values.slave.containerSecurityContext }} - securityContext: {{- toYaml .Values.slave.containerSecurityContext | nindent 12 }} - {{- end }} - command: - - /bin/bash - - -ec - - | - chown -R {{ .Values.slave.securityContext.runAsUser }}:{{ .Values.slave.securityContext.fsGroup }} {{ .Values.slave.persistence.mountPath }} - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.slave.persistence.mountPath }} - {{- end }} - containers: - - name: mysql - image: {{ template "mysql.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - env: - - name: MYSQL_REPLICATION_MODE - value: "slave" - - name: MYSQL_MASTER_HOST - value: {{ template "mysql.fullname" . }} - - name: MYSQL_MASTER_PORT_NUMBER - value: "3306" - - name: MYSQL_MASTER_ROOT_USER - value: "root" - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_MASTER_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_MASTER_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - - name: MYSQL_REPLICATION_USER - value: "{{ .Values.replication.user }}" - {{- if .Values.replication.injectSecretsAsVolume }} - - name: MYSQL_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-replication-password" - {{- else }} - - name: MYSQL_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-replication-password - {{- end }} - {{- if .Values.slave.extraEnvVars }} - {{- include "mysql.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} - envFrom: - {{- if .Values.slave.extraEnvVarsCM }} - - configMapRef: - name: {{ .Values.slave.extraEnvVarsCM }} - {{- end }} - {{- if .Values.slave.extraEnvVarsSecret }} - - secretRef: - name: {{ .Values.slave.extraEnvVarsSecret }} - {{- end }} - {{- end }} - ports: - - name: mysql - containerPort: 3306 - {{- if .Values.slave.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.slave.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.resources }} - resources: {{- toYaml .Values.slave.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.slave.persistence.mountPath }} - {{- if .Values.slave.config }} - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - mountPath: /opt/bitnami/mysql/secrets/ - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "mysql.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - env: - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_MASTER_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_MASTER_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - command: - - /bin/sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter - ports: - - name: metrics - containerPort: 9104 - livenessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 15 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 5 - timeoutSeconds: 1 - {{- if .Values.metrics.resources }} - resources: {{- toYaml .Values.metrics.resources | nindent 12 }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume }} - volumeMounts: - - name: mysql-credentials - mountPath: /opt/bitnami/mysqld-exporter/secrets/ - {{- end }} - {{- end }} - volumes: - {{- if .Values.slave.config }} - - name: config - configMap: - name: {{ template "mysql.slave.fullname" . }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - secret: - secretName: {{ template "mysql.fullname" . }} - items: - {{- if .Values.root.injectSecretsAsVolume }} - - key: mysql-root-password - path: mysql-root-password - {{- end }} - {{- if .Values.replication.injectSecretsAsVolume }} - - key: mysql-replication-password - path: mysql-replication-password - {{- end }} - {{- end }} -{{- if not .Values.slave.persistence.enabled }} - - name: "data" - emptyDir: {} -{{- else if and .Values.slave.persistence.enabled .Values.slave.persistence.existingClaim }} - - name: "data" - persistentVolumeClaim: - claimName: {{ .Values.slave.persistence.existingClaim }} -{{- else if and .Values.slave.persistence.enabled (not .Values.slave.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - labels: - app: {{ template "mysql.name" . }} - component: slave - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.slave.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.slave.persistence.size | quote }} - {{ include "mysql.slave.storageClass" . }} -{{- end }} -{{- end }} diff --git a/lib/common/services/mysql/templates/slave-svc.yaml b/lib/common/services/mysql/templates/slave-svc.yaml deleted file mode 100644 index 450b00b7..00000000 --- a/lib/common/services/mysql/templates/slave-svc.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.replication.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave - {{- if or .Values.service.annotations .Values.metrics.service.annotations }} - annotations: - {{- if .Values.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.metrics.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} - {{- if not (empty .Values.service.loadBalancerIP.slave) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP.slave }} - {{- end }} - {{- end }} - ports: - - name: mysql - port: {{ .Values.service.port }} - targetPort: mysql - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} - nodePort: {{ .Values.service.nodePort.slave }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - port: {{ .Values.metrics.service.port }} - targetPort: metrics - {{- end }} - selector: {{- include "mysql.matchLabels" . | nindent 4 }} - component: slave -{{- end }} diff --git a/lib/common/services/mysql/values-production.yaml b/lib/common/services/mysql/values-production.yaml deleted file mode 100644 index 303d981b..00000000 --- a/lib/common/services/mysql/values-production.yaml +++ /dev/null @@ -1,304 +0,0 @@ -## Admin (root) credentials -## -root: - ## MySQL admin password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount admin password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## Custom user/db credentials -## -db: - ## MySQL username and password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run - ## Note that this user should be different from the MySQL replication user (replication.user) - ## - user: - password: - ## Database to create - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run - ## - name: my_database - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## Replication configuration -## -replication: - ## Enable replication. This enables the creation of replicas of MySQL. If false, only a - ## master deployment would be created - ## - enabled: true - ## - ## MySQL replication user - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## Note that this user should be different from the MySQL user (db.user) - ## - user: replicator - ## MySQL replication user password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." -# -## ConfigMap with scripts to be run at first boot -## Note: This will override initdbScripts -# initdbScriptsConfigMap: - -## Slave nodes parameters -## -slave: - ## Number of slave replicas - ## - replicas: 2 - - ## Configure MySQL slave with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for slave nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL slave pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL slave containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL slave containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL slave containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on slave nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## MySQL prometheus metrics parameters -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint -## -metrics: - enabled: true - ## Bitnami MySQL Prometheus exporter image - ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ - ## - image: - registry: docker.io - repository: bitnami/mysqld-exporter - tag: 0.12.1-debian-10-r127 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## MySQL Prometheus exporter containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 0.5 - # memory: 256Mi - requests: {} - # cpu: 0.5 - # memory: 256Mi - - ## MySQL Prometheus exporter service parameters - ## - service: - type: ClusterIP - port: 9104 - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.port }}" - - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - enabled: false - ## Namespace in which Prometheus is running - ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## - # selector: - # prometheus: my-prometheus diff --git a/lib/common/services/mysql/values.schema.json b/lib/common/services/mysql/values.schema.json new file mode 100644 index 00000000..8021a460 --- /dev/null +++ b/lib/common/services/mysql/values.schema.json @@ -0,0 +1,178 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MySQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "required": ["database", "username", "password"], + "properties": { + "rootPassword": { + "type": "string", + "title": "MySQL root password", + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "database": { + "type": "string", + "title": "MySQL custom database name" + }, + "username": { + "type": "string", + "title": "MySQL custom username" + }, + "password": { + "type": "string", + "title": "MySQL custom password" + }, + "replicationUser": { + "type": "string", + "title": "MySQL replication username" + }, + "replicationPassword": { + "type": "string", + "title": "MySQL replication password" + } + } + }, + "primary": { + "type": "object", + "title": "Primary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL primary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL primary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "primary/persistence/enabled" + } + } + } + } + } + }, + "secondary": { + "type": "object", + "title": "Secondary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL secondary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL secondary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "secondary/persistence/enabled" + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/lib/common/services/mysql/values.yaml b/lib/common/services/mysql/values.yaml index 7cc01845..3ff7a0e8 100644 --- a/lib/common/services/mysql/values.yaml +++ b/lib/common/services/mysql/values.yaml @@ -1,553 +1,900 @@ +## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets [array] Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) ## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Cluster domain +## +clusterDomain: cluster.local +## @param commonAnnotations [object] Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} +## @param commonLabels [object] Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param extraDeploy [array] Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MySQL common parameters ## Bitnami MySQL image ## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## @param image.registry MySQL image registry +## @param image.repository MySQL image repository +## @param image.tag MySQL image tag (immutable tags are recommended) +## @param image.pullPolicy MySQL image pull policy +## @param image.pullSecrets [array] Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled ## image: registry: docker.io repository: bitnami/mysql - tag: 8.0.20-debian-10-r37 + tag: 8.0.29-debian-10-r2 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName + pullSecrets: [] ## Set to true if you would like to see extra information on logs - ## It turns BASH debugging in minideb-extras-base + ## It turns BASH and/or NAMI debugging in the image ## debug: false - -## String to partially override mysql.fullname template (will maintain the release name) +## @param architecture MySQL architecture (`standalone` or `replication`) ## -# nameOverride: - -## String to fully override mysql.fullname template +architecture: standalone +## MySQL Authentication parameters ## -# fullnameOverride: - -## Cluster domain +auth: + ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## @param auth.database Name for a custom database to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-on-first-run + ## + database: my_database + ## @param auth.username Name for a custom user to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-user-on-first-run + ## + username: "" + ## @param auth.password Password for the new user. Ignored if existing secret is provided + ## + password: "" + ## @param auth.replicationUser MySQL replication user + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + replicationUser: replicator + ## @param auth.replicationPassword MySQL replication user password. Ignored if existing secret is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` + ## NOTE: When it's set the auth.rootPassword, auth.password, auth.replicationPassword are ignored. + ## + existingSecret: "" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## @param auth.customPasswordFiles [object] Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mysql-root + ## user: /vault/secrets/mysql-user + ## replicator: /vault/secrets/mysql-replicator + ## + customPasswordFiles: {} +## @param initdbScripts [object] Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." ## -clusterDomain: cluster.local +initdbScripts: {} +## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsConfigMap: "" + +## @section MySQL Primary parameters + +primary: + ## @param primary.command [array] Override default container command on MySQL Primary container(s) (useful when using custom images) + ## + command: [] + ## @param primary.args [array] Override default container args on MySQL Primary container(s) (useful when using custom images) + ## + args: [] + ## @param primary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.configuration [string] Configure MySQL Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param primary.updateStrategy Update strategy type for the MySQL primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param primary.rollingUpdatePartition Partition update strategy for MySQL Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param primary.podAnnotations [object] Additional pod annotations for MySQL primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param primary.podAffinityPreset MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## MySQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key MySQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values [array] MySQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity [object] Affinity for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector [object] Node labels for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations [array] Tolerations for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param primary.podSecurityContext.enabled Enable security context for MySQL primary pods + ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param primary.containerSecurityContext.enabled MySQL primary container securityContext + ## @param primary.containerSecurityContext.runAsUser User ID for the MySQL primary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL primary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param primary.resources.limits [object] The resources limits for MySQL primary containers + ## @param primary.resources.requests [object] The requested resources for MySQL primary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.readinessProbe.enabled Enable readinessProbe + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.startupProbe.enabled Enable startupProbe + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 10 + successThreshold: 1 + ## @param primary.customLivenessProbe [object] Override default liveness probe for MySQL primary containers + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe [object] Override default readiness probe for MySQL primary containers + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe [object] Override default startup probe for MySQL primary containers + ## + customStartupProbe: {} + ## @param primary.extraFlags MySQL primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param primary.extraEnvVars [array] Extra environment variables to be set on MySQL primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL primary containers + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL primary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.annotations [object] MySQL primary persistent volume claim annotations + ## + annotations: {} + ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size MySQL primary persistent volume size + ## + size: 8Gi + ## @param primary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.initContainers [array] Add additional init containers for the MySQL Primary pod(s) + ## + initContainers: [] + ## @param primary.sidecars [array] Add additional sidecar containers for the MySQL Primary pod(s) + ## + sidecars: [] + ## MySQL Primary Service parameters + ## + service: + ## @param primary.service.type MySQL Primary K8s service type + ## + type: ClusterIP + ## @param primary.service.port MySQL Primary K8s service port + ## + port: 3306 + ## @param primary.service.nodePort MySQL Primary K8s service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param primary.service.clusterIP MySQL Primary K8s service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.loadBalancerIP MySQL Primary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL Primary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param primary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL primary pods + ## + enabled: false + ## @param primary.pdb.minAvailable Minimum number/percentage of MySQL primary pods that should remain scheduled + ## + minAvailable: 1 + ## @param primary.pdb.maxUnavailable Maximum number/percentage of MySQL primary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param primary.podLabels [object] MySQL Primary pod label. If labels are same as commonLabels , this will take precedence + ## + podLabels: {} + +## @section MySQL Secondary parameters + +secondary: + ## @param secondary.replicaCount Number of MySQL secondary replicas + ## + replicaCount: 1 + ## @param secondary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param secondary.command [array] Override default container command on MySQL Secondary container(s) (useful when using custom images) + ## + command: [] + ## @param secondary.args [array] Override default container args on MySQL Secondary container(s) (useful when using custom images) + ## + args: [] + ## @param secondary.configuration [string] Configure MySQL Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param secondary.existingConfigmap Name of existing ConfigMap with MySQL Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param secondary.updateStrategy Update strategy type for the MySQL secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param secondary.rollingUpdatePartition Partition update strategy for MySQL Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param secondary.podAnnotations [object] Additional pod annotations for MySQL secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param secondary.podAffinityPreset MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param secondary.podAntiAffinityPreset MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + ## MySQL Secondary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param secondary.nodeAffinityPreset.type MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param secondary.nodeAffinityPreset.key MySQL secondary node label key to match Ignored if `secondary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param secondary.nodeAffinityPreset.values [array] MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param secondary.affinity [object] Affinity for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param secondary.nodeSelector [object] Node labels for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param secondary.tolerations [array] Tolerations for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param secondary.podSecurityContext.enabled Enable security context for MySQL secondary pods + ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param secondary.containerSecurityContext.enabled MySQL secondary container securityContext + ## @param secondary.containerSecurityContext.runAsUser User ID for the MySQL secondary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL secondary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param secondary.resources.limits [object] The resources limits for MySQL secondary containers + ## @param secondary.resources.requests [object] The requested resources for MySQL secondary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.livenessProbe.enabled Enable livenessProbe + ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.readinessProbe.enabled Enable readinessProbe + ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.startupProbe.enabled Enable startupProbe + ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param secondary.customLivenessProbe [object] Override default liveness probe for MySQL secondary containers + ## + customLivenessProbe: {} + ## @param secondary.customReadinessProbe [object] Override default readiness probe for MySQL secondary containers + ## + customReadinessProbe: {} + ## @param secondary.customStartupProbe [object] Override default startup probe for MySQL secondary containers + ## + customStartupProbe: {} + ## @param secondary.extraFlags MySQL secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param secondary.extraEnvVars [array] An array to add extra environment variables on MySQL secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL secondary containers + ## + extraEnvVarsCM: "" + ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL secondary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param secondary.persistence.enabled Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` + ## + enabled: true + ## @param secondary.persistence.storageClass MySQL secondary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param secondary.persistence.annotations [object] MySQL secondary persistent volume claim annotations + ## + annotations: {} + ## @param secondary.persistence.accessModes MySQL secondary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param secondary.persistence.size MySQL secondary persistent volume size + ## + size: 8Gi + ## @param secondary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param secondary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL secondary pod(s) + ## + extraVolumes: [] + ## @param secondary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) + ## + extraVolumeMounts: [] + ## @param secondary.initContainers [array] Add additional init containers for the MySQL secondary pod(s) + ## + initContainers: [] + ## @param secondary.sidecars [array] Add additional sidecar containers for the MySQL secondary pod(s) + ## + sidecars: [] + ## MySQL Secondary Service parameters + ## + service: + ## @param secondary.service.type MySQL secondary Kubernetes service type + ## + type: ClusterIP + ## @param secondary.service.port MySQL secondary Kubernetes service port + ## + port: 3306 + ## @param secondary.service.nodePort MySQL secondary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param secondary.service.clusterIP MySQL secondary Kubernetes service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param secondary.service.loadBalancerIP MySQL secondary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param secondary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL secondary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param secondary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param secondary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL secondary pods + ## + enabled: false + ## @param secondary.pdb.minAvailable Minimum number/percentage of MySQL secondary pods that should remain scheduled + ## + minAvailable: 1 + ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MySQL secondary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param secondary.podLabels [object] Additional pod labels for MySQL secondary pods + ## + podLabels: {} + +## @section RBAC parameters + +## MySQL pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MySQL pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the mysql.fullname template + ## + name: "" + ## @param serviceAccount.annotations [object] Annotations for MySQL Service Account + ## + annotations: {} +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## + create: false + +## @section Network Policy + +## MySQL Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port MySQL is listening + ## on. When true, MySQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector [object] A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## @section Volume Permissions parameters ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. ## volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Init container' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - -## Use existing secret (ignores root, db and replication passwords) -## -# existingSecret: - -## Admin (root) credentials -## -root: - ## MySQL admin password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount admin password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## Custom user/db credentials -## -db: - ## MySQL username and password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run - ## Note that this user should be different from the MySQL replication user (replication.user) - ## - user: - password: - ## Database to create - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run - ## - name: my_database - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## Replication configuration -## -replication: - ## Enable replication. This enables the creation of replicas of MySQL. If false, only a - ## master deployment would be created - ## - enabled: true - ## - ## MySQL replication user - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## Note that this user should be different from the MySQL user (db.user) - ## - user: replicator - ## MySQL replication user password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." -# -## ConfigMap with scripts to be run at first boot -## Note: This will override initdbScripts -# initdbScriptsConfigMap: - -serviceAccount: - create: true - ## Specify the name of the service account created/used - # name: - -## Master nodes parameters -## -master: - ## Configure MySQL with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - plugin_dir=/opt/bitnami/mysql/plugin - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for master nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL master pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL master containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL master containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL master containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on master nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## Slave nodes parameters -## -slave: - ## Number of slave replicas - ## - replicas: 1 - - ## Configure MySQL slave with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for slave nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL slave pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL slave containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL slave containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL slave containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on slave nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## MySQL Service properties -## -service: - ## MySQL Service type - ## - type: ClusterIP - - #name: {{ service_name }} - ## MySQL Service port - ## - port: 3306 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: - master: - slave: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - ## loadBalancerIP for the MySQL Service (optional, cloud specific) - ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer - ## - # loadBalancerIP: - # master: - # slave: -## MySQL prometheus metrics parameters -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint -## -metrics: - enabled: false - ## Bitnami MySQL Prometheus exporter image - ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets [array] Specify docker-registry secret names as an array ## image: registry: docker.io - repository: bitnami/mysqld-exporter - tag: 0.12.1-debian-10-r127 + repository: bitnami/bitnami-shell + tag: 10-debian-10-r409 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - ## MySQL Prometheus exporter containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + pullSecrets: [] + ## @param volumePermissions.resources [object] Init container volume-permissions resources ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 0.5 - # memory: 256Mi - requests: {} - # cpu: 0.5 - # memory: 256Mi + resources: {} +## @section Metrics parameters + +## Mysqld Prometheus exporter parameters +## +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry Exporter image registry + ## @param metrics.image.repository Exporter image repository + ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Exporter image pull policy + ## @param metrics.image.pullSecrets [array] Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.14.0-debian-10-r53 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] ## MySQL Prometheus exporter service parameters + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.service.type Kubernetes service type for MySQL Prometheus Exporter + ## @param metrics.service.port MySQL Prometheus Exporter service port + ## @param metrics.service.annotations [object] Prometheus exporter service annotations ## service: type: ClusterIP @@ -555,27 +902,125 @@ metrics: annotations: prometheus.io/scrape: "true" prometheus.io/port: "{{ .Values.metrics.service.port }}" - - ## Prometheus Operator ServiceMonitor configuration + ## @param metrics.extraArgs.primary [array] Extra args to be passed to mysqld_exporter on Primary pods + ## @param metrics.extraArgs.secondary [array] Extra args to be passed to mysqld_exporter on Secondary pods + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + ## Mysqld Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits [object] The resources limits for MySQL prometheus exporter containers + ## @param metrics.resources.requests [object] The requested resources for MySQL prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Mysqld Prometheus exporter liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Mysqld Prometheus exporter readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator ## serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## enabled: false - ## Namespace in which Prometheus is running + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings [array] Specify Metric Relabellings to add to the scrape endpoint ## - # selector: - # prometheus: my-prometheus + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels [object] Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 37e35087..b57fc56a 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -55,9 +55,8 @@ charts: # Chart is frozen due to custom modification see https://github.com/Qovery/engine/pull/293 - name: mysql repo_name: bitnami - version: 6.14.2 + version: 8.9.6 dest: services - no_sync: true - name: postgresql repo_name: bitnami version: 8.9.8 diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index f0de9792..952e64b9 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -619,6 +619,48 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { } } +pub fn database_test_environment_on_upgrade(context: &Context) -> EnvironmentRequest { + let suffix = "c3dn5so3dltod3s"; + let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); + + EnvironmentRequest { + execution_id: context.execution_id().to_string(), + id: "c4dn5so3dltod3s".to_string(), + owner_id: "c5dn5so3dltod3s".to_string(), + project_id: "c6dn5so3dltod3s".to_string(), + organization_id: context.organization_id().to_string(), + action: Action::Create, + applications: vec![Application { + long_id: Uuid::from_str("9d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(), + name: application_name, + git_url: "https://github.com/Qovery/engine-testing.git".to_string(), + commit_id: "fc575a2f3be0b9100492c8a463bf18134a8698a5".to_string(), + dockerfile_path: Some("Dockerfile".to_string()), + buildpack_language: None, + root_path: String::from("/"), + action: Action::Create, + git_credentials: Some(GitCredentials { + login: "x-access-token".to_string(), + access_token: "xxx".to_string(), + expired_at: Utc::now(), + }), + storage: vec![], + environment_vars: BTreeMap::default(), + branch: "basic-app-deploy".to_string(), + ports: vec![], + total_cpus: "100m".to_string(), + total_ram_in_mib: 256, + min_instances: 1, + max_instances: 1, + cpu_burst: "100m".to_string(), + advance_settings: Default::default(), + }], + routers: vec![], + databases: vec![], + clone_from_environment_id: None, + } +} + pub fn environment_only_http_server_router_with_sticky_session( context: &Context, test_domain: &str, @@ -1672,3 +1714,226 @@ where Err(e) => Err(e), } } + +pub fn test_db_on_upgrade( + context: Context, + logger: Box, + mut environment: EnvironmentRequest, + secrets: FuncTestsSecrets, + version: &str, + test_name: &str, + db_kind: DatabaseKind, + provider_kind: Kind, + database_mode: DatabaseMode, + is_public: bool, +) -> String { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + let context_for_delete = context.clone_not_same_execution_id(); + + let app_id = Uuid::from_str("8d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(); + let database_username = "superuser".to_string(); + let database_password = "uxoyf358jojkemj".to_string(); + let db_kind_str = db_kind.name().to_string(); + let db_id = "c2dn5so3dltod3s".to_string(); + let database_host = format!("{}-{}", db_id, db_kind_str.clone()); + let database_fqdn = format!( + "{}.{}.{}", + database_host, + context.cluster_id(), + secrets + .clone() + .DEFAULT_TEST_DOMAIN + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + ); + + let db_infos = db_infos( + db_kind.clone(), + db_id.clone(), + database_mode.clone(), + database_username.clone(), + database_password.clone(), + if is_public { + database_fqdn.clone() + } else { + database_host.clone() + }, + ); + let database_port = db_infos.db_port.clone(); + let storage_size = 10; + let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone()); + let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone()); + let db = Database { + kind: db_kind.clone(), + action: Action::Create, + long_id: Uuid::from_str("7d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(), + name: db_id.clone(), + version: version.to_string(), + fqdn_id: database_host.clone(), + fqdn: database_fqdn.clone(), + port: database_port.clone(), + username: database_username.clone(), + password: database_password.clone(), + total_cpus: "50m".to_string(), + total_ram_in_mib: 256, + disk_size_in_gib: storage_size.clone(), + database_instance_type: db_instance_type.to_string(), + database_disk_type: db_disk_type.to_string(), + encrypt_disk: true, + activate_high_availability: false, + activate_backups: false, + publicly_accessible: is_public.clone(), + mode: database_mode.clone(), + }; + + environment.databases = vec![db.clone()]; + + let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id()); + environment.applications = environment + .applications + .into_iter() + .map(|mut app| { + app.long_id = app_id.clone(); + app.name = to_short_id(&app_id); + app.branch = app_name.clone(); + app.commit_id = db_infos.app_commit.clone(); + app.ports = vec![Port { + id: "zdf7d6aad".to_string(), + long_id: Default::default(), + port: 1234, + public_port: Some(1234), + name: None, + publicly_accessible: true, + protocol: Protocol::HTTP, + }]; + app.dockerfile_path = Some(format!("Dockerfile-{}", version)); + app.environment_vars = db_infos.app_env_vars.clone(); + app + }) + .collect::>(); + + let mut environment_delete = environment.clone(); + environment_delete.action = Action::Delete; + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); + + let (localisation, kubernetes_version) = match provider_kind { + Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), + Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), + Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + }; + + let engine_config = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + match database_mode.clone() { + DatabaseMode::CONTAINER => { + match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(pvc) => assert_eq!( + pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, + format!("{}Gi", storage_size) + ), + Err(_) => assert!(false), + }; + + match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(svc) => assert_eq!( + svc.items + .expect("No items in svc") + .into_iter() + .filter(|svc| svc.metadata.name == database_host && &svc.spec.svc_type == "LoadBalancer") + .collect::>() + .len(), + match is_public { + true => 1, + false => 0, + } + ), + Err(_) => assert!(false), + }; + } + DatabaseMode::MANAGED => { + match get_svc(context, provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(svc) => { + let service = svc + .items + .expect("No items in svc") + .into_iter() + .filter(|svc| svc.metadata.name == database_host && svc.spec.svc_type == "ExternalName") + .collect::>(); + let annotations = &service[0].metadata.annotations; + assert_eq!(service.len(), 1); + match is_public { + true => { + assert!(annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")); + assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_fqdn); + } + false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")), + } + } + Err(_) => assert!(false), + }; + } + } + + let engine_config_for_delete = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + }; + + // let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); + assert!(matches!(ret, TransactionResult::Ok)); + + return test_name.to_string(); +} diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 30346270..c32f608f 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -14,7 +14,7 @@ use self::test_utilities::utilities::{ use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; use qovery_engine::utilities::to_short_id; -use test_utilities::common::{test_db, Infrastructure}; +use test_utilities::common::{test_db, test_db_on_upgrade, Infrastructure}; /** ** @@ -664,6 +664,39 @@ fn test_mysql_configuration(version: &str, test_name: &str, database_mode: Datab }) } +#[allow(dead_code)] +fn test_mysql_configuration_on_upgrade(version: &str, test_name: &str, database_mode: DatabaseMode, is_public: bool) { + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .AWS_TEST_ORGANIZATION_ID + .as_ref() + .expect("AWS_TEST_ORGANIZATION_ID is not set") + .as_str(), + secrets + .AWS_TEST_CLUSTER_ID + .as_ref() + .expect("AWS_TEST_CLUSTER_ID is not set") + .as_str(), + ); + let environment = test_utilities::common::database_test_environment_on_upgrade(&context); + + engine_run_test(|| { + test_db_on_upgrade( + context, + logger(), + environment, + secrets, + version, + test_name, + DatabaseKind::Mysql, + Kind::Aws, + database_mode, + is_public, + ) + }) +} + // MySQL self-hosted environment #[cfg(feature = "test-aws-self-hosted")] #[named] @@ -687,6 +720,14 @@ fn private_mysql_v8_deploy_a_working_dev_environment() { test_mysql_configuration("8.0", function_name!(), CONTAINER, false); } +#[cfg(feature = "test-aws-self-hosted")] +#[named] +#[test] +#[ignore] +fn private_mysql_v8_deploy_a_working_dev_environment_on_upgrade() { + test_mysql_configuration_on_upgrade("8.0", function_name!(), CONTAINER, false); +} + #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] From 5d49ce71a6cea20de2094a05dcdd27cc7cd66eba Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 4 May 2022 17:17:32 +0200 Subject: [PATCH 107/122] chore: upgrade aws vpc cni (#615) --- .../bootstrap/charts/aws-vpc-cni/Chart.yaml | 4 +- .../bootstrap/charts/aws-vpc-cni/README.md | 2 + .../aws-vpc-cni/templates/clusterrole.yaml | 5 +- .../templates/customresourcedefinition.yaml | 6 +- .../aws-vpc-cni/templates/daemonset.yaml | 19 ++ .../bootstrap/charts/aws-vpc-cni/test.yaml | 170 ++++++++++++++++++ .../bootstrap/charts/aws-vpc-cni/values.yaml | 45 +++-- lib/helm-freeze.yaml | 2 +- 8 files changed, 230 insertions(+), 23 deletions(-) create mode 100644 lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml index 2f572eb2..13d0cfd6 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.7.5 +appVersion: v1.10.2 description: A Helm chart for the AWS VPC CNI home: https://github.com/aws/amazon-vpc-cni-k8s icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png @@ -15,4 +15,4 @@ maintainers: name: aws-vpc-cni sources: - https://github.com/aws/amazon-vpc-cni-k8s -version: 1.1.3 +version: 1.1.13 diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md index 768f629d..24152152 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md @@ -54,6 +54,7 @@ The following table lists the configurable parameters for this chart and their d | `nodeSelector` | Node labels for pod assignment | `{}` | | `podSecurityContext` | Pod Security Context | `{}` | | `podAnnotations` | annotations to add to each pod | `{}` | +| `podLabels` | Labels to add to each pod | `{}` | | `priorityClassName` | Name of the priorityClass | `system-node-critical` | | `resources` | Resources for the pods | `requests.cpu: 10m` | | `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` | @@ -65,6 +66,7 @@ The following table lists the configurable parameters for this chart and their d | `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` | | `tolerations` | Optional deployment tolerations | `[]` | | `updateStrategy` | Optional update strategy | `type: RollingUpdate` | +| `cri.hostPath` | Optional use alternative container runtime | `nil` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml index 0635b5ed..f9db311a 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml @@ -12,9 +12,12 @@ rules: verbs: ["list", "watch", "get"] - apiGroups: [""] resources: - - pods - namespaces verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - pods + verbs: ["list", "watch", "get", "patch"] - apiGroups: [""] resources: - nodes diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml index bdd29e7a..2385fb3a 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml @@ -1,5 +1,5 @@ {{- if .Values.crd.create -}} -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: eniconfigs.crd.k8s.amazonaws.com @@ -12,6 +12,10 @@ spec: - name: v1alpha1 served: true storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true names: plural: eniconfigs singular: eniconfig diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml index 10388ef0..8c31dd77 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml @@ -14,6 +14,9 @@ spec: {{- else }} app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} + {{- end }} {{- end }} template: metadata: @@ -27,6 +30,9 @@ spec: app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} k8s-app: aws-node + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} spec: priorityClassName: "{{ .Values.priorityClassName }}" serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }} @@ -63,8 +69,10 @@ spec: name: metrics livenessProbe: {{ toYaml .Values.livenessProbe | indent 12 }} + timeoutSeconds: {{ .Values.livenessProbeTimeoutSeconds }} readinessProbe: {{ toYaml .Values.readinessProbe | indent 12 }} + timeoutSeconds: {{ .Values.readinessProbeTimeoutSeconds }} env: {{- range $key, $value := .Values.env }} - name: {{ $key }} @@ -92,8 +100,13 @@ spec: {{- end }} - mountPath: /host/var/log/aws-routed-eni name: log-dir +{{- if .Values.cri.hostPath }} + - mountPath: /var/run/cri.sock + name: cri +{{- else }} - mountPath: /var/run/dockershim.sock name: dockershim +{{- end }} - mountPath: /var/run/aws-node name: run-dir - mountPath: /run/xtables.lock @@ -110,9 +123,15 @@ spec: configMap: name: {{ include "aws-vpc-cni.fullname" . }} {{- end }} +{{- with .Values.cri.hostPath }} + - name: cri + hostPath: + {{- toYaml . | nindent 10 }} +{{- else }} - name: dockershim hostPath: path: /var/run/dockershim.sock +{{- end }} - name: log-dir hostPath: path: /var/log/aws-routed-eni diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml new file mode 100644 index 00000000..6c582a13 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml @@ -0,0 +1,170 @@ +# Test values for aws-vpc-cni. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This default name override is to maintain backwards compatability with +# existing naming +nameOverride: aws-node + +init: + image: + tag: v1.10.2 + region: us-west-2 + account: "602401143452" + pullPolicy: Always + domain: "amazonaws.com" + # Set to use custom image + # override: "repo/org/image:tag" + env: + DISABLE_TCP_EARLY_DEMUX: "false" + ENABLE_IPv6: "false" + securityContext: + privileged: true + +image: + region: us-west-2 + tag: v1.10.2 + account: "602401143452" + domain: "amazonaws.com" + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + +# The CNI supports a number of environment variable settings +# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables +env: + ADDITIONAL_ENI_TAGS: "{}" + AWS_VPC_CNI_NODE_PORT_SUPPORT: "true" + AWS_VPC_ENI_MTU: "9001" + AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false" + AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false" + AWS_VPC_K8S_CNI_EXTERNALSNAT: "false" + AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log" + AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG + AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng" + AWS_VPC_K8S_CNI_VETHPREFIX: eni + AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log" + AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG + DISABLE_INTROSPECTION: "false" + DISABLE_METRICS: "false" + ENABLE_POD_ENI: "false" + ENABLE_PREFIX_DELEGATION: "false" + WARM_ENI_TARGET: "1" + WARM_PREFIX_TARGET: "1" + DISABLE_NETWORK_RESOURCE_PROVISIONING: "false" + ENABLE_IPv4: "true" + ENABLE_IPv6: "false" + +# this flag enables you to use the match label that was present in the original daemonset deployed by EKS +# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release +originalMatchLabels: false + +cniConfig: + enabled: false + fileContents: "" + +imagePullSecrets: [] + +fullnameOverride: "aws-node" + +priorityClassName: system-node-critical + +podSecurityContext: {} + +podAnnotations: {} + +podLabels: {} + +securityContext: + capabilities: + add: + - "NET_ADMIN" + +crd: + create: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +livenessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' + initialDelaySeconds: 60 + +livenessProbeTimeoutSeconds: 10 + +readinessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' + initialDelaySeconds: 1 + +readinessProbeTimeoutSeconds: 10 + +resources: + requests: + cpu: 10m + +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + +nodeSelector: {} + +tolerations: [] + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + +eniConfig: + # Specifies whether ENIConfigs should be created + create: false + region: us-west-2 + subnets: + # Key identifies the AZ + # Value contains the subnet ID and security group IDs within that AZ + # a: + # id: subnet-123 + # securityGroups: + # - sg-123 + # b: + # id: subnet-456 + # securityGroups: + # - sg-456 + # c: + # id: subnet-789 + # securityGroups: + # - sg-789 + +cri: + hostPath: # "/var/run/containerd/containerd.sock" \ No newline at end of file diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml index 84388b40..7917bdff 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml @@ -8,19 +8,24 @@ nameOverride: aws-node init: image: - tag: v1.7.5 + tag: v1.10.2 region: us-west-2 + account: "602401143452" pullPolicy: Always + domain: "amazonaws.com" # Set to use custom image # override: "repo/org/image:tag" env: DISABLE_TCP_EARLY_DEMUX: "false" + ENABLE_IPv6: "false" securityContext: privileged: true image: region: us-west-2 - tag: v1.7.5 + tag: v1.10.2 + account: "602401143452" + domain: "amazonaws.com" pullPolicy: Always # Set to use custom image # override: "repo/org/image:tag" @@ -43,13 +48,18 @@ env: DISABLE_INTROSPECTION: "false" DISABLE_METRICS: "false" ENABLE_POD_ENI: "false" + ENABLE_PREFIX_DELEGATION: "false" WARM_ENI_TARGET: "1" + WARM_PREFIX_TARGET: "1" + DISABLE_NETWORK_RESOURCE_PROVISIONING: "false" + ENABLE_IPv4: "true" + ENABLE_IPv6: "false" # this flag enables you to use the match label that was present in the original daemonset deployed by EKS # You can then annotate and label the original aws-node resources and 'adopt' them into a helm release originalMatchLabels: false -cniConfig: +cniConfig: enabled: false fileContents: "" @@ -63,6 +73,8 @@ podSecurityContext: {} podAnnotations: {} +podLabels: {} + securityContext: capabilities: add: @@ -85,15 +97,23 @@ livenessProbe: command: - /app/grpc-health-probe - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' initialDelaySeconds: 60 +livenessProbeTimeoutSeconds: 10 + readinessProbe: exec: command: - /app/grpc-health-probe - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' initialDelaySeconds: 1 +readinessProbeTimeoutSeconds: 10 + resources: requests: cpu: 10m @@ -111,20 +131,6 @@ affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - - matchExpressions: - - key: "beta.kubernetes.io/os" - operator: In - values: - - linux - - key: "beta.kubernetes.io/arch" - operator: In - values: - - amd64 - - arm64 - - key: "eks.amazonaws.com/compute-type" - operator: NotIn - values: - - fargate - matchExpressions: - key: "kubernetes.io/os" operator: In @@ -158,4 +164,7 @@ eniConfig: # c: # id: subnet-789 # securityGroups: - # - sg-789 \ No newline at end of file + # - sg-789 + +cri: + hostPath: # "/var/run/containerd/containerd.sock" \ No newline at end of file diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index b57fc56a..308ca8fa 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -25,7 +25,7 @@ charts: - name: aws-vpc-cni repo_name: aws dest: aws-bootstrap - version: 1.1.3 + version: 1.1.13 - name: aws-calico repo_name: aws dest: aws-bootstrap From afc1adc39aead3509e6b828a554de85ba50e975f Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 4 May 2022 17:17:58 +0200 Subject: [PATCH 108/122] chore: upgrade aws calico (#616) --- .../bootstrap/charts/aws-calico/Chart.yaml | 4 +- lib/aws/bootstrap/charts/aws-calico/README.md | 50 +++++++++++-------- .../aws-calico/templates/daemon-set.yaml | 22 ++++++++ .../aws-calico/templates/deployment.yaml | 30 ++++++++--- .../charts/aws-calico/templates/rbac.yaml | 8 +++ .../bootstrap/charts/aws-calico/values.yaml | 12 +++-- lib/helm-freeze.yaml | 2 +- .../aws/kubernetes/helm_charts.rs | 10 ++++ 8 files changed, 106 insertions(+), 32 deletions(-) diff --git a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml index 40ab5de7..f31cea34 100644 --- a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 -appVersion: 3.13.4 +appVersion: 3.19.1 description: A Helm chart for installing Calico on AWS icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png name: aws-calico -version: 0.3.1 +version: 0.3.10 diff --git a/lib/aws/bootstrap/charts/aws-calico/README.md b/lib/aws/bootstrap/charts/aws-calico/README.md index 9abbca69..b5fb89ca 100644 --- a/lib/aws/bootstrap/charts/aws-calico/README.md +++ b/lib/aws/bootstrap/charts/aws-calico/README.md @@ -1,7 +1,11 @@ # Calico on AWS +**Note**: The recommended way to install calico on EKS is via tigera-opeartor instead of this helm-chart. +You can follow https://docs.aws.amazon.com/eks/latest/userguide/calico.html for detailed instructions. + This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html + ## Prerequisites - Kubernetes 1.11+ running on AWS @@ -38,26 +42,32 @@ If you receive an error similar to `Error: release aws-calico failed: The following table lists the configurable parameters for this chart and their default values. -| Parameter | Description | Default | -|----------------------------------------|---------------------------------------------------------|---------------------------------| -| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | -| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | -| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | -| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | -| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | -| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | -| `calico.node.logseverity` | Calico Node Log Severity | `Info` | -| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | -| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.tag` | Calico version | `v3.8.1` | -| `fullnameOverride` | Override the fullname of the chart | `calico` | -| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | -| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | -| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | +| Parameter | Description | Default | +|------------------------------------------|---------------------------------------------------------|---------------------------------| +| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | +| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | +| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | +| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha.podAnnotations` | Calico Typha Node Pod Annotations | `{}` | +| `calico.typha.podLabels` | Calico Typha Node Pod Labels | `{}` | +| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | +| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | +| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | +| `calico.node.logseverity` | Calico Node Log Severity | `Info` | +| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.node.podAnnotations` | Calico Node Pod Annotations | `{}` | +| `calico.node.podLabels` | Calico Node Pod Labels | `{}` | +| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | +| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha_autoscaler.podAnnotations` | Calico Typha Autoscaler Pod Annotations | `{}` | +| `calico.typha_autoscaler.podLabels` | Calico Typha Autoscaler Pod Labels | `{}` | +| `calico.tag` | Calico version | `v3.8.1` | +| `fullnameOverride` | Override the fullname of the chart | `calico` | +| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | +| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml index ce553146..0cc7742c 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml @@ -9,6 +9,9 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + {{- if .Values.calico.node.podLabels }} +{{ toYaml .Values.calico.node.podLabels | indent 6 }} + {{- end }} updateStrategy: type: RollingUpdate rollingUpdate: @@ -17,8 +20,23 @@ spec: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + {{- if .Values.calico.node.podLabels }} +{{ toYaml .Values.calico.node.podLabels | indent 8 }} + {{- end }} + {{- with .Values.calico.node.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate nodeSelector: {{- toYaml .Values.calico.node.nodeSelector | nindent 8 }} hostNetwork: true @@ -70,6 +88,8 @@ spec: value: "none" - name: FELIX_PROMETHEUSMETRICSENABLED value: "true" + - name: FELIX_ROUTESOURCE + value: "WorkloadIPs" - name: NO_DEFAULT_POOLS value: "true" # Set based on the k8s node name. @@ -95,12 +115,14 @@ spec: periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 + timeoutSeconds: 5 readinessProbe: exec: command: - /bin/calico-node - -felix-ready periodSeconds: 10 + timeoutSeconds: 5 resources: {{- toYaml .Values.calico.node.resources | nindent 12 }} volumeMounts: diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml index a879a8d2..360d4a87 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml @@ -10,12 +10,21 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + {{- if .Values.calico.typha.podLabels }} +{{ toYaml .Values.calico.typha.podLabels | indent 6 }} + {{- end }} template: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + {{- if .Values.calico.typha.podLabels }} +{{ toYaml .Values.calico.typha.podLabels | indent 8 }} + {{- end }} annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + {{- with .Values.calico.typha.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-cluster-critical nodeSelector: @@ -24,9 +33,9 @@ spec: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - {{- if .Values.calico.typha.tolerations }} -{{ toYaml .Values.calico.typha.tolerations | indent 10 }} - {{- end }} + {{- if .Values.calico.typha.tolerations }} + {{- toYaml .Values.calico.typha.tolerations | nindent 8 }} + {{- end }} hostNetwork: true serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 @@ -97,11 +106,20 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + {{- if .Values.calico.typha_autoscaler.podLabels }} +{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 6 }} + {{- end }} replicas: 1 template: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + {{- if .Values.calico.typha_autoscaler.podLabels }} +{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 8 }} + {{- end }} + {{- with .Values.calico.typha_autoscaler.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-cluster-critical nodeSelector: @@ -110,9 +128,9 @@ spec: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - {{- if .Values.calico.typha_autoscaler.tolerations }} -{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }} - {{- end }} + {{- if .Values.calico.typha_autoscaler.tolerations }} + {{- toYaml .Values.calico.typha_autoscaler.tolerations | nindent 8 }} + {{- end }} containers: - image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}" name: autoscaler diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml index 7caa7fa4..64538d7d 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml @@ -15,6 +15,14 @@ rules: - configmaps verbs: - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list - apiGroups: [""] resources: - endpoints diff --git a/lib/aws/bootstrap/charts/aws-calico/values.yaml b/lib/aws/bootstrap/charts/aws-calico/values.yaml index c192e92e..26c1da48 100644 --- a/lib/aws/bootstrap/charts/aws-calico/values.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/values.yaml @@ -7,7 +7,7 @@ podSecurityPolicy: create: false calico: - tag: v3.13.4 + tag: v3.19.1 typha: logseverity: Info #Debug, Info, Warning, Error, Fatal @@ -22,6 +22,8 @@ calico: tolerations: [] nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} node: logseverity: Info #Debug, Info, Warning, Error, Fatal image: quay.io/calico/node @@ -37,6 +39,8 @@ calico: # value: 'some value' nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} typha_autoscaler: resources: requests: @@ -48,7 +52,9 @@ calico: tolerations: [] nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} autoscaler: - tag: "1.7.1" - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.8.3" + image: k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64 diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 308ca8fa..1babc180 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -29,7 +29,7 @@ charts: - name: aws-calico repo_name: aws dest: aws-bootstrap - version: 0.3.1 + version: 0.3.10 - name: cluster-autoscaler repo_name: cluster-autoscaler version: 9.10.4 diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index e4595d2e..4908df10 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -249,6 +249,16 @@ pub fn aws_helm_charts( chart_info: ChartInfo { name: "calico".to_string(), path: chart_path("charts/aws-calico"), + values: vec![ + ChartSetValue { + key: "calico.node.resources.limits.memory".to_string(), + value: "128Mi".to_string(), + }, + ChartSetValue { + key: "calico.node.resources.requests.memory".to_string(), + value: "128Mi".to_string(), + }, + ], ..Default::default() }, }; From 08ac38b95a48cb4f1f2975952a8c96cd435dd355 Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 4 May 2022 17:18:23 +0200 Subject: [PATCH 109/122] chore: upgrade aws node term handler (#617) --- .../aws-node-termination-handler/.helmignore | 1 + .../aws-node-termination-handler/Chart.yaml | 28 +- .../aws-node-termination-handler/README.md | 222 ++++++++----- .../templates/_helpers.tpl | 72 ++++- .../templates/clusterrole.yaml | 15 + .../templates/clusterrolebinding.yaml | 14 +- .../templates/daemonset.linux.yaml | 198 ++++++++++++ .../templates/daemonset.windows.yaml | 192 +++++++++++ .../templates/daemonset.yaml | 141 --------- .../templates/deployment.yaml | 202 ++++++++++++ .../templates/pdb.yaml | 13 + .../templates/podmonitor.yaml | 31 ++ .../templates/psp.yaml | 25 +- .../templates/service.yaml | 17 + .../templates/serviceaccount.yaml | 11 +- .../templates/servicemonitor.yaml | 31 ++ .../aws-node-termination-handler/values.yaml | 299 ++++++++++++++---- lib/helm-freeze.yaml | 2 +- .../aws/kubernetes/helm_charts.rs | 1 + 19 files changed, 1196 insertions(+), 319 deletions(-) create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml delete mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml create mode 100644 lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore index 50af0317..69a52314 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore @@ -20,3 +20,4 @@ .idea/ *.tmproj .vscode/ +example-values*.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml index 47ed2baa..9de84506 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml @@ -1,27 +1,25 @@ -apiVersion: v1 -appVersion: 1.5.0 -description: A Helm chart for the AWS Node Termination Handler +apiVersion: v2 +appVersion: 1.14.1 +description: A Helm chart for the AWS Node Termination Handler. home: https://github.com/aws/eks-charts icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png keywords: +- aws - eks - ec2 - node-termination - spot +kubeVersion: '>= 1.16-0' maintainers: -- email: nckturner@users.noreply.github.com - name: Nicholas Turner - url: https://github.com/nckturner -- email: stefanprodan@users.noreply.github.com - name: Stefan Prodan - url: https://github.com/stefanprodan +- email: bwagner5@users.noreply.github.com + name: Brandon Wagner + url: https://github.com/bwagner5 - email: jillmon@users.noreply.github.com - name: Jillian Montalvo + name: Jillian Kuentz url: https://github.com/jillmon -- email: mattrandallbecker@users.noreply.github.com - name: Matthew Becker - url: https://github.com/mattrandallbecker name: aws-node-termination-handler sources: -- https://github.com/aws/eks-charts -version: 0.8.0 +- https://github.com/aws/aws-node-termination-handler/ +- https://github.com/aws/eks-charts/ +type: application +version: 0.16.1 diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md index f1847304..48766925 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md @@ -1,96 +1,170 @@ # AWS Node Termination Handler -AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler. +AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at [github.com/aws/aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler). ## Prerequisites -* Kubernetes >= 1.11 +- _Kubernetes_ >= v1.16 ## Installing the Chart -Add the EKS repository to Helm: -```sh -helm repo add eks https://aws.github.io/eks-charts -``` -Install AWS Node Termination Handler: -To install the chart with the release name aws-node-termination-handler and default configuration: +Before you can install the chart you will need to add the `aws` repo to [Helm](https://helm.sh/). -```sh -helm install --name aws-node-termination-handler \ - --namespace kube-system eks/aws-node-termination-handler +```shell +helm repo add eks https://aws.github.io/eks-charts/ ``` -To install into an EKS cluster where the Node Termination Handler is already installed, you can run: +After you've installed the repo you can install the chart, the following command will install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace. -```sh -helm upgrade --install --recreate-pods --force \ - aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler +```shell +helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler ``` -If you receive an error similar to `Error: release aws-node-termination-handler -failed: "aws-node-termination-handler" already exists`, simply rerun -the above command. +To install the chart on an EKS cluster where the AWS Node Termination Handler is already installed, you can run the following command. -The [configuration](#configuration) section lists the parameters that can be configured during installation. - -## Uninstalling the Chart - -To uninstall/delete the `aws-node-termination-handler` deployment: - -```sh -helm delete --purge aws-node-termination-handler +```shell +helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler --recreate-pods --force ``` -The command removes all the Kubernetes components associated with the chart and deletes the release. +If you receive an error similar to the one below simply rerun the above command. + +> Error: release aws-node-termination-handler failed: "aws-node-termination-handler" already exists + +To uninstall the `aws-node-termination-handler` chart installation from the `kube-system` namespace run the following command. + +```shell +helm delete --namespace kube-system aws-node-termination-handler +``` ## Configuration -The following tables lists the configurable parameters of the chart and their default values. +The following tables lists the configurable parameters of the chart and their default values. These values are split up into the [common configuration](#common-configuration) shared by all AWS Node Termination Handler modes, [queue configuration](#queue-processor-mode-configuration) used when AWS Node Termination Handler is in in queue-processor mode, and [IMDS configuration](#imds-mode-configuration) used when AWS Node Termination Handler is in IMDS mode; for more information about the different modes see the project [README](https://github.com/aws/aws-node-termination-handler/blob/main/README.md). -Parameter | Description | Default ---- | --- | --- -`image.repository` | image repository | `amazon/aws-node-termination-handler` -`image.tag` | image tag | `` -`image.pullPolicy` | image pull policy | `IfNotPresent` -`image.pullSecrets` | image pull secrets (for private docker registries) | `[]` -`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false` -`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` -`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true` -`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80` -`webhookURL` | Posts event data to URL upon instance interruption action | `` -`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | `` -`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}` -`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}` -`dryRun` | If true, only log if a node would be drained | `false` -`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false` -`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true` -`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3` -`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false` -`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false` -`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false` -`affinity` | node/pod affinities | None -`podAnnotations` | annotations to add to each pod | `{}` -`priorityClassName` | Name of the priorityClass | `system-node-critical` -`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi` -`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet` -`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}` -`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]` -`rbac.create` | if `true`, create and use RBAC resources | `true` -`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` -`serviceAccount.create` | If `true`, create a new service account | `true` -`serviceAccount.name` | Service account to be used | None -`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` -`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime` -`securityContext.runAsUserID` | User ID to run the container | `1000` -`securityContext.runAsGroupID` | Group ID to run the container | `1000` -`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os` -`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch` -`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false` -`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092` +### Common Configuration -## Metrics endpoint consideration -If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace -since it's listening on all interfaces. -Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. +The configuration in this table applies to all AWS Node Termination Handler modes. + +| Parameter | Description | Default | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| `image.repository` | Image repository. | `public.ecr.aws/aws-ec2/aws-node-termination-handler` | +| `image.tag` | Image tag. | `v{{ .Chart.AppVersion}}` | +| `image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets. | `[]` | +| `nameOverride` | Override the `name` of the chart. | `""` | +| `fullnameOverride` | Override the `fullname` of the chart. | `""` | +| `serviceAccount.create` | If `true`, create a new service account. | `true` | +| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` | +| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` | +| `rbac.create` | If `true`, create the RBAC resources. | `true` | +| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `true` | +| `customLabels` | Labels to add to all resource metadata. | `{}` | +| `podLabels` | Labels to add to the pod. | `{}` | +| `podAnnotations` | Annotations to add to the pod. | `{}` | +| `podSecurityContext` | Security context for the pod. | _See values.yaml_ | +| `securityContext` | Security context for the _aws-node-termination-handler_ container. | _See values.yaml_ | +| `terminationGracePeriodSeconds` | The termination grace period for the pod. | `nil` | +| `resources` | Resource requests and limits for the _aws-node-termination-handler_ container. | `{}` | +| `nodeSelector` | Expressions to select a node by it's labels for pod assignment. In IMDS mode this has a higher priority than `daemonsetNodeSelector` (for backwards compatibility) but shouldn't be used. | `{}` | +| `affinity` | Affinity settings for pod assignment. In IMDS mode this has a higher priority than `daemonsetAffinity` (for backwards compatibility) but shouldn't be used. | `{}` | +| `tolerations` | Tolerations for pod assignment. In IMDS mode this has a higher priority than `daemonsetTolerations` (for backwards compatibility) but shouldn't be used. | `[]` | +| `extraEnv` | Additional environment variables for the _aws-node-termination-handler_ container. | `[]` | +| `probes` | The Kubernetes liveness probe configuration. | _See values.yaml_ | +| `logLevel` | Sets the log level (`info`,`debug`, or `error`) | `info` | +| `jsonLogging` | If `true`, use JSON-formatted logs instead of human readable logs. | `false` | +| `enablePrometheusServer` | If `true`, start an http server exposing `/metrics` endpoint for _Prometheus_. | `false` | +| `prometheusServerPort` | Replaces the default HTTP port for exposing _Prometheus_ metrics. | `9092` | +| `dryRun` | If `true`, only log if a node would be drained. | `false` | +| `cordonOnly` | If `true`, nodes will be cordoned but not drained when an interruption event occurs. | `false` | +| `taintNode` | If `true`, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance`, `aws-node-termination-handler/spot-itn`, `aws-node-termination-handler/asg-lifecycle-termination` and `aws-node-termination-handler/rebalance-recommendation`. | `false` | +| `deleteLocalData` | If `true`, continue even if there are pods using local data that will be deleted when the node is drained. | `true` | +| `ignoreDaemonSets` | If `true`, skip terminating daemon set managed pods. | `true` | +| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` | +| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` | +| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` | +| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` | +| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` | +| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` | +| `webhookHeaders` | Replace the default webhook headers (e.g. `{"Content-type":"application/json"}`). | `""` | +| `webhookProxy` | Uses the specified HTTP(S) proxy for sending webhook data. | `""` | +| `webhookTemplate` | Replaces the default webhook message template (e.g. `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Instance: {{ .InstanceID }} - Node: {{ .NodeName }} - Description: {{ .Description }} - Start Time: {{ .StartTime }}"}`). | `""` | +| `webhookTemplateConfigMapName` | Pass the webhook template file as a configmap. | "``" | +| `webhookTemplateConfigMapKey` | Name of the Configmap key storing the template file. | `""` | +| `enableSqsTerminationDraining` | If `true`, this turns on queue-processor mode which drains nodes when an SQS termination event is received. | `false` | + +### Queue-Processor Mode Configuration + +The configuration in this table applies to AWS Node Termination Handler in queue-processor mode. + +| Parameter | Description | Default | +| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | +| `replicas` | The number of replicas in the deployment when using queue-processor mode (NOTE: increasing replicas may cause duplicate webhooks since pods are stateless). | `1` | +| `strategy` | Specify the update strategy for the deployment. | `{}` | +| `podDisruptionBudget` | Limit the disruption for controller pods, requires at least 2 controller replicas. | `{}` | +| `serviceMonitor.create` | If `true`, create a ServiceMonitor. This requires `enablePrometheusServer: true`. | `false` | +| `serviceMonitor.namespace` | Override ServiceMonitor _Helm_ release namespace. | `nil` | +| `serviceMonitor.labels` | Additional ServiceMonitor metadata labels. | `{}` | +| `serviceMonitor.interval` | _Prometheus_ scrape interval. | `30s` | +| `serviceMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` | +| `priorityClassName` | Name of the PriorityClass to use for the Deployment. | `system-cluster-critical` | +| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` | +| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` | +| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` | +| `checkASGTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. | `true` | +| `managedAsgTag` | The node tag to check if `checkASGTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` | +| `assumeAsgTagPropagation` | If `true`, assume that ASG tags will be appear on the ASG's instances. | `false` | + +### IMDS Mode Configuration + +The configuration in this table applies to AWS Node Termination Handler in IMDS mode. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` | +| `linuxPodLabels` | Labels to add to each Linux pod. | `{}` | +| `windowsPodLabels` | Labels to add to each Windows pod. | `{}` | +| `linuxPodAnnotations` | Annotations to add to each Linux pod. | `{}` | +| `windowsPodAnnotations` | Annotations to add to each Windows pod. | `{}` | +| `updateStrategy` | Update strategy for the all DaemonSets. | _See values.yaml_ | +| `daemonsetPriorityClassName` | Name of the PriorityClass to use for all DaemonSets. | `system-node-critical` | +| `podMonitor.create` | If `true`, create a PodMonitor. This requires `enablePrometheusServer: true`. | `false` | +| `podMonitor.namespace` | Override PodMonitor _Helm_ release namespace. | `nil` | +| `podMonitor.labels` | Additional PodMonitor metadata labels | `{}` | +| `podMonitor.interval` | _Prometheus_ scrape interval. | `30s` | +| `podMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` | +| `useHostNetwork` | If `true`, enables `hostNetwork` for the Linux DaemonSet. NOTE: setting this to `false` may cause issues accessing IMDSv2 if your account is not configured with an IP hop count of 2 see [Metrics Endpoint Considerations](#metrics-endpoint-considerations) | `true` | +| `dnsPolicy` | If specified, this overrides `linuxDnsPolicy` and `windowsDnsPolicy` with a single policy. | `""` | +| `linuxDnsPolicy` | DNS policy for the Linux DaemonSet. | `""` | +| `windowsDnsPolicy` | DNS policy for the Windows DaemonSet. | `""` | +| `daemonsetNodeSelector` | Expressions to select a node by it's labels for DaemonSet pod assignment. For backwards compatibility the `nodeSelector` value has priority over this but shouldn't be used. | `{}` | +| `linuxNodeSelector` | Override `daemonsetNodeSelector` for the Linux DaemonSet. | `{}` | +| `windowsNodeSelector` | Override `daemonsetNodeSelector` for the Windows DaemonSet. | `{}` | +| `daemonsetAffinity` | Affinity settings for DaemonSet pod assignment. For backwards compatibility the `affinity` has priority over this but shouldn't be used. | `{}` | +| `linuxAffinity` | Override `daemonsetAffinity` for the Linux DaemonSet. | `{}` | +| `windowsAffinity` | Override `daemonsetAffinity` for the Windows DaemonSet. | `{}` | +| `daemonsetTolerations` | Tolerations for DaemonSet pod assignment. For backwards compatibility the `tolerations` has priority over this but shouldn't be used. | `[]` | +| `linuxTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` | +| `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` | +| `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` | +| `metadataTries` | The number of times to try requesting metadata. | `3` | +| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. | `true` | +| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. This is **EXPERIMENTAL**. | `false` | +| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. | `false` | +| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. | `false` | + +### Testing Configuration + +The configuration in this table applies to AWS Node Termination Handler testing and is **NOT RECOMMENDED** FOR PRODUCTION DEPLOYMENTS. + +| Parameter | Description | Default | +| --------------------- | --------------------------------------------------------------------------------- | -------------- | +| `awsEndpoint` | (Used for testing) If specified, use the provided AWS endpoint to make API calls. | `""` | +| `awsSecretAccessKey` | (Used for testing) Pass-thru environment variable. | `nil` | +| `awsAccessKeyID` | (Used for testing) Pass-thru environment variable. | `nil` | +| `instanceMetadataURL` | (Used for testing) If specified, use the provided metadata URL. | `""` | +| `procUptimeFile` | (Used for Testing) Specify the uptime file. | `/proc/uptime` | + +## Metrics Endpoint Considerations + +AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. + +You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl index 902844a7..45f06f4b 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl @@ -1,4 +1,5 @@ {{/* vim: set filetype=mustache: */}} + {{/* Expand the name of the chart. */}} @@ -25,17 +26,11 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{/* -Common labels +Equivalent to "aws-node-termination-handler.fullname" except that "-win" indicator is appended to the end. +Name will not exceed 63 characters. */}} -{{- define "aws-node-termination-handler.labels" -}} -app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} -helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -k8s-app: aws-node-termination-handler -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- define "aws-node-termination-handler.fullnameWindows" -}} +{{- include "aws-node-termination-handler.fullname" . | trunc 59 | trimSuffix "-" | printf "%s-win" -}} {{- end -}} {{/* @@ -45,6 +40,47 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Common labels +*/}} +{{- define "aws-node-termination-handler.labels" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/component: {{ .Release.Name }} +app.kubernetes.io/part-of: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} +{{- with .Values.customLabels }} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "aws-node-termination-handler.selectorLabels" -}} +app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Selector labels for the deployment +*/}} +{{- define "aws-node-termination-handler.selectorLabelsDeployment" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +app.kubernetes.io/component: deployment +{{- end -}} + +{{/* +Selector labels for the daemonset +*/}} +{{- define "aws-node-termination-handler.selectorLabelsDaemonset" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +app.kubernetes.io/component: daemonset +{{- end -}} + {{/* Create the name of the service account to use */}} @@ -55,3 +91,19 @@ Create the name of the service account to use {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} + +{{/* +The image to use +*/}} +{{- define "aws-node-termination-handler.image" -}} +{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} + +{{/* Get PodDisruptionBudget API Version */}} +{{- define "aws-node-termination-handler.pdb.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml index dc800866..43c2b030 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml @@ -1,7 +1,10 @@ +{{- if .Values.rbac.create -}} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -9,6 +12,7 @@ rules: - nodes verbs: - get + - list - patch - update - apiGroups: @@ -17,6 +21,7 @@ rules: - pods verbs: - list + - get - apiGroups: - "" resources: @@ -35,3 +40,13 @@ rules: - daemonsets verbs: - get +{{- if .Values.emitKubernetesEvents }} +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml index b5c25327..1058df1b 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml @@ -1,12 +1,16 @@ +{{- if .Values.rbac.create -}} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: {{ include "aws-node-termination-handler.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} roleRef: + apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: {{ include "aws-node-termination-handler.fullname" . }} - apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml new file mode 100644 index 00000000..199879c3 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml @@ -0,0 +1,198 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "linux") -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} + kubernetes.io/os: linux + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }} + kubernetes.io/os: linux + k8s-app: aws-node-termination-handler + {{- with (mergeOverwrite (dict) .Values.podLabels .Values.linuxPodLabels) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.linuxPodAnnotations }} + annotations: + {{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.linuxPodAnnotations) | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.daemonsetPriorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + hostNetwork: {{ .Values.useHostNetwork }} + dnsPolicy: {{ default .Values.linuxDnsPolicy .Values.dnsPolicy }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: {{ .Values.enableProbesServer | quote }} + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + {{- with .Values.instanceMetadataURL }} + - name: INSTANCE_METADATA_URL + value: {{ . | quote }} + {{- end }} + - name: METADATA_TRIES + value: {{ .Values.metadataTries | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: {{ .Values.enableSpotInterruptionDraining | quote }} + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: {{ .Values.enableScheduledEventDraining | quote }} + - name: ENABLE_REBALANCE_MONITORING + value: {{ .Values.enableRebalanceMonitoring | quote }} + - name: ENABLE_REBALANCE_DRAINING + value: {{ .Values.enableRebalanceDraining | quote }} + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "false" + - name: UPTIME_FROM_FILE + value: {{ .Values.procUptimeFile | quote }} + {{- if or .Values.enablePrometheusServer .Values.enableProbesServer }} + ports: + {{- if .Values.enableProbesServer }} + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + {{- end }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + {{- end }} + {{- end }} + {{- if .Values.enableProbesServer }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: uptime + mountPath: {{ .Values.procUptimeFile }} + readOnly: true + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: webhook-template + mountPath: /config/ + {{- end }} + volumes: + - name: uptime + hostPath: + path: {{ .Values.procUptimeFile | default "/proc/uptime" }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.linuxNodeSelector) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.linuxAffinity) }} + affinity: + {{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.linuxAffinity)) | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.linuxTolerations) }} + tolerations: + {{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.linuxTolerations )) | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml new file mode 100644 index 00000000..ea7f8337 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml @@ -0,0 +1,192 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "windows") -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullnameWindows" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} + kubernetes.io/os: windows + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }} + kubernetes.io/os: windows + k8s-app: aws-node-termination-handler + {{- with (mergeOverwrite (dict) .Values.podLabels .Values.windowsPodLabels) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.windowsPodAnnotations }} + annotations: + {{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.windowsPodAnnotations) | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.daemonsetPriorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + hostNetwork: false + dnsPolicy: {{ default .Values.windowsDnsPolicy .Values.dnsPolicy }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: {{ .Values.enableProbesServer | quote }} + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + {{- with .Values.instanceMetadataURL }} + - name: INSTANCE_METADATA_URL + value: {{ . | quote }} + {{- end }} + - name: METADATA_TRIES + value: {{ .Values.metadataTries | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: {{ .Values.enableSpotInterruptionDraining | quote }} + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: {{ .Values.enableScheduledEventDraining | quote }} + - name: ENABLE_REBALANCE_MONITORING + value: {{ .Values.enableRebalanceMonitoring | quote }} + - name: ENABLE_REBALANCE_DRAINING + value: {{ .Values.enableRebalanceDraining | quote }} + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "false" + {{- if or .Values.enablePrometheusServer .Values.enableProbesServer }} + ports: + {{- if .Values.enableProbesServer }} + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + hostPort: {{ .Values.probes.httpGet.port }} + {{- end }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + hostPort: {{ .Values.prometheusServerPort }} + {{- end }} + {{- end }} + {{- if .Values.enableProbesServer }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumeMounts: + - name: webhook-template + mountPath: /config/ + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumes: + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: windows + {{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.windowsNodeSelector) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.windowsAffinity) }} + affinity: + {{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.windowsAffinity )) | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.windowsTolerations) }} + tolerations: + {{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.windowsTolerations )) | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml deleted file mode 100644 index fb220022..00000000 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ include "aws-node-termination-handler.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -spec: - updateStrategy: -{{ toYaml .Values.updateStrategy | indent 4 }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - {{- if .Values.podAnnotations }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - labels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - k8s-app: aws-node-termination-handler - spec: - volumes: - - name: "uptime" - hostPath: - path: "{{ .Values.procUptimeFile }}" - priorityClassName: "{{ .Values.priorityClassName }}" - affinity: - nodeAffinity: - # NOTE(jaypipes): Change when we complete - # https://github.com/aws/aws-node-termination-handler/issues/8 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }} - operator: In - values: - - linux - - key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }} - operator: In - values: - - amd64 - - arm - - arm64 - serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }} - hostNetwork: true - dnsPolicy: {{ .Values.dnsPolicy }} - containers: - - name: {{ include "aws-node-termination-handler.name" . }} - image: {{ .Values.image.repository}}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: {{ .Values.securityContext.runAsUserID }} - runAsGroup: {{ .Values.securityContext.runAsGroupID }} - allowPrivilegeEscalation: false - volumeMounts: - - name: "uptime" - mountPath: "/proc/uptime" - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SPOT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: DELETE_LOCAL_DATA - value: {{ .Values.deleteLocalData | quote }} - - name: IGNORE_DAEMON_SETS - value: {{ .Values.ignoreDaemonSets | quote }} - - name: GRACE_PERIOD - value: {{ .Values.gracePeriod | quote }} - - name: POD_TERMINATION_GRACE_PERIOD - value: {{ .Values.podTerminationGracePeriod | quote }} - - name: INSTANCE_METADATA_URL - value: {{ .Values.instanceMetadataURL | quote }} - - name: NODE_TERMINATION_GRACE_PERIOD - value: {{ .Values.nodeTerminationGracePeriod | quote }} - - name: WEBHOOK_URL - value: {{ .Values.webhookURL | quote }} - - name: WEBHOOK_HEADERS - value: {{ .Values.webhookHeaders | quote }} - - name: WEBHOOK_TEMPLATE - value: {{ .Values.webhookTemplate | quote }} - - name: DRY_RUN - value: {{ .Values.dryRun | quote }} - - name: ENABLE_SPOT_INTERRUPTION_DRAINING - value: {{ .Values.enableSpotInterruptionDraining | quote }} - - name: ENABLE_SCHEDULED_EVENT_DRAINING - value: {{ .Values.enableScheduledEventDraining | quote }} - - name: METADATA_TRIES - value: {{ .Values.metadataTries | quote }} - - name: CORDON_ONLY - value: {{ .Values.cordonOnly | quote }} - - name: TAINT_NODE - value: {{ .Values.taintNode | quote }} - - name: JSON_LOGGING - value: {{ .Values.jsonLogging | quote }} - - name: WEBHOOK_PROXY - value: {{ .Values.webhookProxy | quote }} - - name: ENABLE_PROMETHEUS_SERVER - value: {{ .Values.enablePrometheusServer | quote }} - - name: PROMETHEUS_SERVER_PORT - value: {{ .Values.prometheusServerPort | quote }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml new file mode 100644 index 00000000..38c10e98 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml @@ -0,0 +1,202 @@ +{{- if .Values.enableSqsTerminationDraining }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicas }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 8 }} + k8s-app: aws-node-termination-handler + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: "true" + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + - name: CHECK_ASG_TAG_BEFORE_DRAINING + value: {{ .Values.checkASGTagBeforeDraining | quote }} + - name: MANAGED_ASG_TAG + value: {{ .Values.managedAsgTag | quote }} + - name: ASSUME_ASG_TAG_PROPAGATION + value: {{ .Values.assumeAsgTagPropagation | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: "false" + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: "false" + - name: ENABLE_REBALANCE_MONITORING + value: "false" + - name: ENABLE_REBALANCE_DRAINING + value: "false" + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "true" + {{- with .Values.awsRegion }} + - name: AWS_REGION + value: {{ . | quote }} + {{- end }} + {{- with .Values.awsEndpoint }} + - name: AWS_ENDPOINT + value: {{ . | quote }} + {{- end }} + {{- if and .Values.awsAccessKeyID .Values.awsSecretAccessKey }} + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.awsAccessKeyID | quote }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ .Values.awsSecretAccessKey | quote }} + {{- end }} + - name: QUEUE_URL + value: {{ .Values.queueURL | quote }} + - name: WORKERS + value: {{ .Values.workers | quote }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + {{- end }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumeMounts: + - name: webhook-template + mountPath: /config/ + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumes: + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml new file mode 100644 index 00000000..a2564fc5 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.enableSqsTerminationDraining (and .Values.podDisruptionBudget (gt (int .Values.replicas) 1)) }} +apiVersion: {{ include "aws-node-termination-handler.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml new file mode 100644 index 00000000..bbcbd9b4 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml @@ -0,0 +1,31 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (and .Values.enablePrometheusServer .Values.podMonitor.create) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }} + {{- if .Values.podMonitor.namespace }} + namespace: {{ .Values.podMonitor.namespace }} + {{- end }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.podMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: app.kubernetes.io/name + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + podMetricsEndpoints: + - port: http-metrics + path: /metrics + {{- with .Values.podMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.podMonitor.sampleLimit }} + sampleLimit: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml index 0eda5002..e0034c1f 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml @@ -4,14 +4,25 @@ kind: PodSecurityPolicy metadata: name: {{ template "aws-node-termination-handler.fullname" . }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' spec: privileged: false hostIPC: false - hostNetwork: true + hostNetwork: {{ .Values.useHostNetwork }} hostPID: false +{{- if and (and (not .Values.enableSqsTerminationDraining) .Values.useHostNetwork ) (or .Values.enablePrometheusServer .Values.enableProbesServer) }} + hostPorts: +{{- if .Values.enablePrometheusServer }} + - min: {{ .Values.prometheusServerPort }} + max: {{ .Values.prometheusServerPort }} +{{- end }} +{{- if .Values.enableProbesServer }} + - min: {{ .Values.probesServerPort }} + max: {{ .Values.probesServerPort }} +{{- end }} +{{- end }} readOnlyRootFilesystem: false allowPrivilegeEscalation: false allowedCapabilities: @@ -27,12 +38,13 @@ spec: volumes: - '*' --- -kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: name: {{ template "aws-node-termination-handler.fullname" . }}-psp + namespace: {{ .Release.Namespace }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} rules: - apiGroups: ['policy'] resources: ['podsecuritypolicies'] @@ -44,11 +56,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "aws-node-termination-handler.fullname" . }}-psp + namespace: {{ .Release.Namespace }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole + kind: Role name: {{ template "aws-node-termination-handler.fullname" . }}-psp subjects: - kind: ServiceAccount diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml new file mode 100644 index 00000000..869e2606 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.enableSqsTerminationDraining .Values.enablePrometheusServer -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 4 }} + ports: + - name: http-metrics + port: {{ .Values.prometheusServerPort }} + targetPort: http-metrics + protocol: TCP +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml index 55f2d766..a83276d6 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml @@ -3,11 +3,10 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} {{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml new file mode 100644 index 00000000..caee5051 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.enableSqsTerminationDraining (and .Values.enablePrometheusServer .Values.serviceMonitor.create) -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: app.kubernetes.io/name + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http-metrics + path: /metrics + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.sampleLimit }} + sampleLimit: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml index 469a51e4..a49c7d62 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml @@ -3,100 +3,277 @@ # Declare variables to be passed into your templates. image: - repository: amazon/aws-node-termination-handler - tag: v1.5.0 + repository: public.ecr.aws/aws-ec2/aws-node-termination-handler + # Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }} + tag: "" pullPolicy: IfNotPresent pullSecrets: [] -securityContext: - runAsUserID: 1000 - runAsGroupID: 1000 - nameOverride: "" fullnameOverride: "" -priorityClassName: system-node-critical +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. If namenot set and create is true, a name is generated using fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Specifies if PodSecurityPolicy resources should be created + pspEnabled: true + +customLabels: {} + +podLabels: {} podAnnotations: {} -resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" +podSecurityContext: + fsGroup: 1000 -## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved -enableSpotInterruptionDraining: "" +securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 -## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event -enableScheduledEventDraining: "" +terminationGracePeriodSeconds: -taintNode: false +resources: {} -## dryRun tells node-termination-handler to only log calls to kubernetes control plane +nodeSelector: {} + +affinity: {} + +tolerations: [] + +# Extra environment variables +extraEnv: [] + +# Liveness probe settings +probes: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + +# Set the log level +logLevel: info + +# Log messages in JSON format +jsonLogging: false + +enablePrometheusServer: false +prometheusServerPort: 9092 + +# dryRun tells node-termination-handler to only log calls to kubernetes control plane dryRun: false +# Cordon but do not drain nodes upon spot interruption termination notice. +cordonOnly: false + +# Taint node upon spot interruption termination notice. +taintNode: false + # deleteLocalData tells kubectl to continue even if there are pods using # emptyDir (local data that will be deleted when the node is drained). -deleteLocalData: "" +deleteLocalData: true # ignoreDaemonSets causes kubectl to skip Daemon Set managed pods. -ignoreDaemonSets: "" +ignoreDaemonSets: true -# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully. -# If negative, the default value specified in the pod will be used. -gracePeriod: "" -podTerminationGracePeriod: "" +# podTerminationGracePeriod is time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. +podTerminationGracePeriod: -1 # nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. -nodeTerminationGracePeriod: "" +nodeTerminationGracePeriod: 120 + +# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event +emitKubernetesEvents: false + +# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events +# Example: "first=annotation,sample.annotation/number=two" +kubernetesEventsExtraAnnotations: "" # webhookURL if specified, posts event data to URL upon instance interruption action. webhookURL: "" -# webhookProxy if specified, uses this HTTP(S) proxy configuration. -webhookProxy: "" +# Webhook URL will be fetched from the secret store using the given name. +webhookURLSecretName: "" # webhookHeaders if specified, replaces the default webhook headers. webhookHeaders: "" +# webhookProxy if specified, uses this HTTP(S) proxy configuration. +webhookProxy: "" + # webhookTemplate if specified, replaces the default webhook message template. webhookTemplate: "" -# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80) +# webhook template file will be fetched from given config map name +# if specified, replaces the default webhook message with the content of the template file +webhookTemplateConfigMapName: "" + +# template file name stored in configmap +webhookTemplateConfigMapKey: "" + +# enableSqsTerminationDraining If true, this turns on queue-processor mode which drains nodes when an SQS termination event is received +enableSqsTerminationDraining: false + +# --------------------------------------------------------------------------------------------------------------------- +# Queue Processor Mode +# --------------------------------------------------------------------------------------------------------------------- + +# The number of replicas in the NTH deployment when using queue-processor mode (NOTE: increasing this may cause duplicate webhooks since NTH pods are stateless) +replicas: 1 + +# Specify the update strategy for the deployment +strategy: {} + +# podDisruptionBudget specifies the disruption budget for the controller pods. +# Disruption budget will be configured only when the replicaCount is greater than 1 +podDisruptionBudget: {} +# maxUnavailable: 1 + +serviceMonitor: + # Specifies whether ServiceMonitor should be created + # this needs enableSqsTerminationDraining: true + # and enablePrometheusServer: true + create: false + # Specifies whether the ServiceMonitor should be created in a different namespace than + # the Helm release + namespace: + # Additional labels to add to the metadata + labels: {} + # The Prometheus scrape interval + interval: 30s + # The number of scraped samples that will be accepted + sampleLimit: 5000 + +priorityClassName: system-cluster-critical + +# If specified, use the AWS region for AWS API calls +awsRegion: "" + +# Listens for messages on the specified SQS queue URL +queueURL: "" + +# The maximum amount of parallel event processors to handle concurrent events +workers: 10 + +# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node +checkASGTagBeforeDraining: true + +# The tag to ensure is on a node if checkASGTagBeforeDraining is true +managedAsgTag: "aws-node-termination-handler/managed" + +# If true, assume that ASG tags will be appear on the ASG's instances +assumeAsgTagPropagation: false + +# --------------------------------------------------------------------------------------------------------------------- +# IMDS Mode +# --------------------------------------------------------------------------------------------------------------------- + +# Create node OS specific daemonset(s). (e.g. "linux", "windows", "linux windows") +targetNodeOs: linux + +linuxPodLabels: {} +windowsPodLabels: {} + +linuxPodAnnotations: {} +windowsPodAnnotations: {} + +# K8s DaemonSet update strategy. +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + +daemonsetPriorityClassName: system-node-critical + +podMonitor: + # Specifies whether PodMonitor should be created + # this needs enableSqsTerminationDraining: false + # and enablePrometheusServer: true + create: false + # Specifies whether the PodMonitor should be created in a different namespace than + # the Helm release + namespace: + # Additional labels to add to the metadata + labels: {} + # The Prometheus scrape interval + interval: 30s + # The number of scraped samples that will be accepted + sampleLimit: 5000 + +# Determines if NTH uses host networking for Linux when running the DaemonSet (only IMDS mode; queue-processor never runs with host networking) +# If you have disabled IMDSv1 and are relying on IMDSv2, you'll need to increase the IP hop count to 2 before switching this to false +# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html +useHostNetwork: true + +# Daemonset DNS policy +dnsPolicy: "" +linuxDnsPolicy: ClusterFirstWithHostNet +windowsDnsPolicy: ClusterFirst + +daemonsetNodeSelector: {} +linuxNodeSelector: {} +windowsNodeSelector: {} + +daemonsetAffinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate +linuxAffinity: {} +windowsAffinity: {} + +daemonsetTolerations: + - operator: Exists +linuxTolerations: [] +windowsTolerations: [] + +# If the probes server is running for the Daemonset +enableProbesServer: false + +# Total number of times to try making the metadata request before failing. +metadataTries: 3 + +# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received +enableSpotInterruptionDraining: true + +# enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event +enableScheduledEventDraining: false + +# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received +enableRebalanceMonitoring: false + +# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received +enableRebalanceDraining: false + +# --------------------------------------------------------------------------------------------------------------------- +# Testing +# --------------------------------------------------------------------------------------------------------------------- + +# (TESTING USE): If specified, use the provided AWS endpoint to make API calls. +awsEndpoint: "" + +# (TESTING USE): These should only be used for testing w/ localstack! +awsAccessKeyID: +awsSecretAccessKey: + +# (TESTING USE): Override the default metadata URL (default: http://169.254.169.254:80) instanceMetadataURL: "" # (TESTING USE): Mount path for uptime file -procUptimeFile: "/proc/uptime" - -# nodeSelector tells the daemonset where to place the node-termination-handler -# pods. By default, this value is empty and every node will receive a pod. -nodeSelector: {} - -nodeSelectorTermsOs: "" -nodeSelectorTermsArch: "" - -enablePrometheusServer: false -prometheusServerPort: "9092" - -tolerations: - - operator: "Exists" - -affinity: {} - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. If namenot set and create is true, - # a name is generated using fullname template - name: - annotations: {} - # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME - -rbac: - # rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created - pspEnabled: true - -dnsPolicy: "ClusterFirstWithHostNet" +procUptimeFile: /proc/uptime diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 1babc180..c55f287e 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -21,7 +21,7 @@ charts: - name: aws-node-termination-handler repo_name: aws dest: aws-bootstrap - version: 0.8.0 + version: 0.16.1 - name: aws-vpc-cni repo_name: aws dest: aws-bootstrap diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 4908df10..59d919a5 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -205,6 +205,7 @@ pub fn aws_helm_charts( let aws_node_term_handler = CommonChart { chart_info: ChartInfo { name: "aws-node-term-handler".to_string(), + last_breaking_version_requiring_restart: Some(Version::new(0, 16, 1)), path: chart_path("charts/aws-node-termination-handler"), values: vec![ ChartSetValue { From 1bd7db050476c79d27f717c9354205b64cc0f8da Mon Sep 17 00:00:00 2001 From: MacLikorne Date: Wed, 4 May 2022 17:18:53 +0200 Subject: [PATCH 110/122] chore(ENG_1123_update_cert_manager): update chart manager (#623) --- Cargo.lock | 22 + Cargo.toml | 2 + .../bootstrap/charts/cert-manager/Chart.yaml | 4 +- .../bootstrap/charts/cert-manager/README.md | 24 +- .../templates/cainjector-deployment.yaml | 13 +- .../cert-manager/templates/crds.legacy.yaml | 6257 ----------------- .../charts/cert-manager/templates/crds.yaml | 172 +- .../cert-manager/templates/deployment.yaml | 13 +- .../charts/cert-manager/templates/rbac.yaml | 6 +- .../templates/webhook-mutating-webhook.yaml | 15 - .../templates/webhook-validating-webhook.yaml | 16 - .../bootstrap/charts/cert-manager/values.yaml | 15 + lib/helm-freeze.yaml | 2 +- .../aws/kubernetes/helm_charts.rs | 1 + .../digitalocean/kubernetes/helm_charts.rs | 1 + src/cloud_provider/helm.rs | 65 +- .../scaleway/kubernetes/helm_charts.rs | 1 + src/cmd/helm.rs | 202 +- src/cmd/helm_utils.rs | 411 ++ src/cmd/kubectl.rs | 122 + src/cmd/mod.rs | 1 + src/cmd/structs.rs | 1 + src/fs.rs | 334 +- tests/helm/cert_manager.rs | 348 + tests/helm/mod.rs | 1 + tests/lib.rs | 1 + 26 files changed, 1571 insertions(+), 6479 deletions(-) delete mode 100644 lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml create mode 100644 src/cmd/helm_utils.rs create mode 100644 tests/helm/cert_manager.rs create mode 100644 tests/helm/mod.rs diff --git a/Cargo.lock b/Cargo.lock index e9711f16..33b07210 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2115,6 +2115,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_yaml", "strum", "strum_macros", "sysinfo", @@ -2965,6 +2966,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +dependencies = [ + "indexmap", + "ryu", + "serde", + "yaml-rust", +] + [[package]] name = "sha-1" version = "0.8.2" @@ -4211,6 +4224,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.4.1" diff --git a/Cargo.toml b/Cargo.toml index 5ae0c0e7..d1272877 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,8 @@ tera = "1.10.0" serde = "1.0.126" serde_json = "1.0.64" serde_derive = "1.0.126" +serde_yaml = "0.8.23" + # AWS deps tokio = { version = "1.10.0", features = ["full"] } rusoto_core = "0.47.0" diff --git a/lib/common/bootstrap/charts/cert-manager/Chart.yaml b/lib/common/bootstrap/charts/cert-manager/Chart.yaml index b91d8ee1..01c64ecf 100644 --- a/lib/common/bootstrap/charts/cert-manager/Chart.yaml +++ b/lib/common/bootstrap/charts/cert-manager/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.1.1 +appVersion: v1.2.0 description: A Helm chart for cert-manager home: https://github.com/jetstack/cert-manager icon: https://raw.githubusercontent.com/jetstack/cert-manager/master/logo/logo.png @@ -14,4 +14,4 @@ maintainers: name: cert-manager sources: - https://github.com/jetstack/cert-manager -version: v1.1.1 +version: v1.2.0 diff --git a/lib/common/bootstrap/charts/cert-manager/README.md b/lib/common/bootstrap/charts/cert-manager/README.md index ec353bba..16e55a87 100644 --- a/lib/common/bootstrap/charts/cert-manager/README.md +++ b/lib/common/bootstrap/charts/cert-manager/README.md @@ -19,16 +19,9 @@ Before installing the chart, you must first install the cert-manager CustomResou This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources. ```bash -# Kubernetes 1.15+ -$ kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager.crds.yaml - -# Kubernetes <1.15 -$ kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager-legacy.crds.yaml +$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml ``` -> **Note**: If you're using a Kubernetes version below `v1.15` you will need to install the legacy version of the custom resource definitions. -> This version does not have API version conversion enabled and only supports `cert-manager.io/v1` API resources. - To install the chart with the release name `my-release`: ```console @@ -72,11 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als delete the previously installed CustomResourceDefinition resources: ```console -# Kubernetes 1.15+ -$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager.crds.yaml - -# Kubernetes <1.15 -$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager-legacy.crds.yaml +$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml ``` ## Configuration @@ -91,9 +80,12 @@ The following table lists the configurable parameters of the cert-manager chart | `global.podSecurityPolicy.enabled` | If `true`, create and use PodSecurityPolicy (includes sub-charts) | `false` | | `global.podSecurityPolicy.useAppArmor` | If `true`, use Apparmor seccomp profile in PSP | `true` | | `global.leaderElection.namespace` | Override the namespace used to store the ConfigMap for leader election | `kube-system` | +| `global.leaderElection.leaseDuration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate | | +| `global.leaderElection.renewDeadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration | | +| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | | | `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` | | `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | -| `image.tag` | Image tag | `v1.1.1` | +| `image.tag` | Image tag | `v1.2.0` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `replicaCount` | Number of cert-manager replicas | `1` | | `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod | @@ -148,7 +140,7 @@ The following table lists the configurable parameters of the cert-manager chart | `webhook.affinity` | Node affinity for webhook pod assignment | `{}` | | `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` | | `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` | -| `webhook.image.tag` | Webhook image tag | `v1.1.1` | +| `webhook.image.tag` | Webhook image tag | `v1.2.0` | | `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` | | `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` | | `webhook.securityContext` | Security context for webhook pod assignment | `{}` | @@ -178,7 +170,7 @@ The following table lists the configurable parameters of the cert-manager chart | `cainjector.affinity` | Node affinity for cainjector pod assignment | `{}` | | `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` | | `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` | -| `cainjector.image.tag` | cainjector image tag | `v1.1.1` | +| `cainjector.image.tag` | cainjector image tag | `v1.2.0` | | `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` | | `cainjector.securityContext` | Security context for cainjector pod assignment | `{}` | | `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | `{}` | diff --git a/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml b/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml index 8ac6da01..8944fb4f 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml @@ -61,7 +61,18 @@ spec: {{- if .Values.global.logLevel }} - --v={{ .Values.global.logLevel }} {{- end }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- with .Values.global.leaderElection }} + - --leader-election-namespace={{ .namespace }} + {{- if .leaseDuration }} + - --leader-election-lease-duration={{ .leaseDuration }} + {{- end }} + {{- if .renewDeadline }} + - --leader-election-renew-deadline={{ .renewDeadline }} + {{- end }} + {{- if .retryPeriod }} + - --leader-election-retry-period={{ .retryPeriod }} + {{- end }} + {{- end }} {{- if .Values.cainjector.extraArgs }} {{ toYaml .Values.cainjector.extraArgs | indent 10 }} {{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml b/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml deleted file mode 100644 index 4026fcee..00000000 --- a/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml +++ /dev/null @@ -1,6257 +0,0 @@ -{{- if (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) }} -{{- if .Values.installCRDs }} -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: certificaterequests.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: CertificateRequest - listKind: CertificateRequestList - plural: certificaterequests - shortNames: - - cr - - crs - singular: certificaterequest - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from - one of the configured issuers. \n All fields within the CertificateRequest's - `spec` are immutable after creation. A CertificateRequest will either succeed - or fail, as denoted by its `status.state` field. \n A CertificateRequest is - a 'one-shot' resource, meaning it represents a single point in time request - for a certificate and cannot be re-used." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the CertificateRequest resource. - properties: - duration: - description: The requested 'duration' (i.e. lifetime) of the Certificate. - This option may be ignored/overridden by some issuer types. - type: string - isCA: - description: IsCA will request to mark the certificate as valid for - certificate signing when submitting to the issuer. This will automatically - add the `cert sign` usage to the list of `usages`. - type: boolean - issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If - the 'kind' field is not set, or set to 'Issuer', an Issuer resource - with the given name in the same namespace as the CertificateRequest - will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer - with the provided name will be used. The 'name' field in this stanza - is required at all times. The group field refers to the API group - of the issuer which defaults to 'cert-manager.io' if empty. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - request: - description: The PEM-encoded x509 certificate signing request to be - submitted to the CA for signing. - format: byte - type: string - usages: - description: Usages is the set of x509 usages that are requested for - the certificate. If usages are set they SHOULD be encoded inside the - CSR spec Defaults to `digital signature` and `key encipherment` if - not specified. - items: - description: 'KeyUsage specifies valid usage contexts for keys. See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Valid KeyUsage values are as follows: "signing", "digital signature", - "content commitment", "key encipherment", "key agreement", "data - encipherment", "cert sign", "crl sign", "encipher only", "decipher - only", "any", "server auth", "client auth", "code signing", "email - protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec - user", "timestamping", "ocsp signing", "microsoft sgc", "netscape - sgc"' - enum: - - signing - - digital signature - - content commitment - - key encipherment - - key agreement - - data encipherment - - cert sign - - crl sign - - encipher only - - decipher only - - any - - server auth - - client auth - - code signing - - email protection - - s/mime - - ipsec end system - - ipsec tunnel - - ipsec user - - timestamping - - ocsp signing - - microsoft sgc - - netscape sgc - type: string - type: array - required: - - issuerRef - - request - type: object - status: - description: Status of the CertificateRequest. This is set and managed automatically. - properties: - ca: - description: The PEM encoded x509 certificate of the signer, also known - as the CA (Certificate Authority). This is set on a best-effort basis - by different issuers. If not set, the CA is assumed to be unknown/not - available. - format: byte - type: string - certificate: - description: The PEM encoded x509 certificate resulting from the certificate - signing request. If not set, the CertificateRequest has either not - been completed or has failed. More information on failure can be found - by checking the `conditions` field. - format: byte - type: string - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready` and `InvalidRequest`. - items: - description: CertificateRequestCondition contains condition information - for a CertificateRequest. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready', - 'InvalidRequest'). - type: string - required: - - status - - type - type: object - type: array - failureTime: - description: FailureTime stores the time that this CertificateRequest - failed. This is used to influence garbage collection and back-off. - format: date-time - type: string - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: certificates.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .spec.secretName - name: Secret - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: Certificate - listKind: CertificateList - plural: certificates - shortNames: - - cert - - certs - singular: certificate - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: "A Certificate resource should be created to ensure an up to date - and signed x509 certificate is stored in the Kubernetes Secret resource named - in `spec.secretName`. \n The stored certificate will be renewed before it - expires (as configured by `spec.renewBefore`)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the Certificate resource. - properties: - commonName: - description: 'CommonName is a common name to be used on the Certificate. - The CommonName should have a length of 64 characters or fewer to avoid - generating invalid CSRs. This value is ignored by TLS clients when - any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' - type: string - dnsNames: - description: DNSNames is a list of DNS subjectAltNames to be set on - the Certificate. - items: - type: string - type: array - duration: - description: The requested 'duration' (i.e. lifetime) of the Certificate. - This option may be ignored/overridden by some issuer types. If overridden - and `renewBefore` is greater than the actual certificate duration, - the certificate will be automatically renewed 2/3rds of the way through - the certificate's duration. - type: string - emailAddresses: - description: EmailAddresses is a list of email subjectAltNames to be - set on the Certificate. - items: - type: string - type: array - encodeUsagesInRequest: - description: EncodeUsagesInRequest controls whether key usages should - be present in the CertificateRequest - type: boolean - ipAddresses: - description: IPAddresses is a list of IP address subjectAltNames to - be set on the Certificate. - items: - type: string - type: array - isCA: - description: IsCA will mark this Certificate as valid for certificate - signing. This will automatically add the `cert sign` usage to the - list of `usages`. - type: boolean - issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. - If the 'kind' field is not set, or set to 'Issuer', an Issuer resource - with the given name in the same namespace as the Certificate will - be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer - with the provided name will be used. The 'name' field in this stanza - is required at all times. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - keystores: - description: Keystores configures additional keystore output formats - stored in the `secretName` Secret resource. - properties: - jks: - description: JKS configures options for storing a JKS keystore in - the `spec.secretName` Secret resource. - properties: - create: - description: Create enables JKS keystore creation for the Certificate. - If true, a file named `keystore.jks` will be created in the - target Secret resource, encrypted using the password stored - in `passwordSecretRef`. The keystore file will only be updated - upon re-issuance. - type: boolean - passwordSecretRef: - description: PasswordSecretRef is a reference to a key in a - Secret resource containing the password used to encrypt the - JKS keystore. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - create - - passwordSecretRef - type: object - pkcs12: - description: PKCS12 configures options for storing a PKCS12 keystore - in the `spec.secretName` Secret resource. - properties: - create: - description: Create enables PKCS12 keystore creation for the - Certificate. If true, a file named `keystore.p12` will be - created in the target Secret resource, encrypted using the - password stored in `passwordSecretRef`. The keystore file - will only be updated upon re-issuance. - type: boolean - passwordSecretRef: - description: PasswordSecretRef is a reference to a key in a - Secret resource containing the password used to encrypt the - PKCS12 keystore. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - create - - passwordSecretRef - type: object - type: object - privateKey: - description: Options to control private keys used for the Certificate. - properties: - algorithm: - description: Algorithm is the private key algorithm of the corresponding - private key for this certificate. If provided, allowed values - are either "rsa" or "ecdsa" If `algorithm` is specified and `size` - is not provided, key size of 256 will be used for "ecdsa" key - algorithm and key size of 2048 will be used for "rsa" key algorithm. - enum: - - RSA - - ECDSA - type: string - encoding: - description: The private key cryptography standards (PKCS) encoding - for this certificate's private key to be encoded in. If provided, - allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and - PKCS#8, respectively. Defaults to PKCS#1 if not specified. - enum: - - PKCS1 - - PKCS8 - type: string - rotationPolicy: - description: RotationPolicy controls how private keys should be - regenerated when a re-issuance is being processed. If set to Never, - a private key will only be generated if one does not already exist - in the target `spec.secretName`. If one does exists but it does - not have the correct algorithm or size, a warning will be raised - to await user intervention. If set to Always, a private key matching - the specified requirements will be generated whenever a re-issuance - occurs. Default is 'Never' for backward compatibility. - type: string - size: - description: Size is the key bit size of the corresponding private - key for this certificate. If `algorithm` is set to `RSA`, valid - values are `2048`, `4096` or `8192`, and will default to `2048` - if not specified. If `algorithm` is set to `ECDSA`, valid values - are `256`, `384` or `521`, and will default to `256` if not specified. - No other values are allowed. - type: integer - type: object - renewBefore: - description: The amount of time before the currently issued certificate's - `notAfter` time that cert-manager will begin to attempt to renew the - certificate. If this value is greater than the total duration of the - certificate (i.e. notAfter - notBefore), it will be automatically - renewed 2/3rds of the way through the certificate's duration. - type: string - secretName: - description: SecretName is the name of the secret resource that will - be automatically created and managed by this Certificate resource. - It will be populated with a private key and certificate, signed by - the denoted issuer. - type: string - subject: - description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). - properties: - countries: - description: Countries to be used on the Certificate. - items: - type: string - type: array - localities: - description: Cities to be used on the Certificate. - items: - type: string - type: array - organizationalUnits: - description: Organizational Units to be used on the Certificate. - items: - type: string - type: array - organizations: - description: Organizations to be used on the Certificate. - items: - type: string - type: array - postalCodes: - description: Postal codes to be used on the Certificate. - items: - type: string - type: array - provinces: - description: State/Provinces to be used on the Certificate. - items: - type: string - type: array - serialNumber: - description: Serial number to be used on the Certificate. - type: string - streetAddresses: - description: Street addresses to be used on the Certificate. - items: - type: string - type: array - type: object - uris: - description: URIs is a list of URI subjectAltNames to be set on the - Certificate. - items: - type: string - type: array - usages: - description: Usages is the set of x509 usages that are requested for - the certificate. Defaults to `digital signature` and `key encipherment` - if not specified. - items: - description: 'KeyUsage specifies valid usage contexts for keys. See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Valid KeyUsage values are as follows: "signing", "digital signature", - "content commitment", "key encipherment", "key agreement", "data - encipherment", "cert sign", "crl sign", "encipher only", "decipher - only", "any", "server auth", "client auth", "code signing", "email - protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec - user", "timestamping", "ocsp signing", "microsoft sgc", "netscape - sgc"' - enum: - - signing - - digital signature - - content commitment - - key encipherment - - key agreement - - data encipherment - - cert sign - - crl sign - - encipher only - - decipher only - - any - - server auth - - client auth - - code signing - - email protection - - s/mime - - ipsec end system - - ipsec tunnel - - ipsec user - - timestamping - - ocsp signing - - microsoft sgc - - netscape sgc - type: string - type: array - required: - - issuerRef - - secretName - type: object - status: - description: Status of the Certificate. This is set and managed automatically. - properties: - conditions: - description: List of status conditions to indicate the status of certificates. - Known condition types are `Ready` and `Issuing`. - items: - description: CertificateCondition contains condition information for - an Certificate. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready', - `Issuing`). - type: string - required: - - status - - type - type: object - type: array - lastFailureTime: - description: LastFailureTime is the time as recorded by the Certificate - controller of the most recent failure to complete a CertificateRequest - for this Certificate resource. If set, cert-manager will not re-request - another Certificate until 1 hour has elapsed from this time. - format: date-time - type: string - nextPrivateKeySecretName: - description: The name of the Secret resource containing the private - key to be used for the next certificate iteration. The keymanager - controller will automatically set this field if the `Issuing` condition - is set to `True`. It will automatically unset this field when the - Issuing condition is not set or False. - type: string - notAfter: - description: The expiration time of the certificate stored in the secret - named by this resource in `spec.secretName`. - format: date-time - type: string - notBefore: - description: The time after which the certificate stored in the secret - named by this resource in spec.secretName is valid. - format: date-time - type: string - renewalTime: - description: RenewalTime is the time at which the certificate will be - next renewed. If not set, no upcoming renewal is scheduled. - format: date-time - type: string - revision: - description: "The current 'revision' of the certificate as issued. \n - When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` - set to one greater than the current value of this field. \n Upon issuance, - this field will be set to the value of the annotation on the CertificateRequest - resource used to issue the certificate. \n Persisting the value on - the CertificateRequest resource allows the certificates controller - to know whether a request is part of an old issuance or if it is part - of the ongoing revision's issuance by checking if the revision value - in the annotation is greater than this field." - type: integer - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: challenges.acme.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.state - name: State - type: string - - JSONPath: .spec.dnsName - name: Domain - type: string - - JSONPath: .status.reason - name: Reason - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: acme.cert-manager.io - names: - kind: Challenge - listKind: ChallengeList - plural: challenges - singular: challenge - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Challenge is a type to represent a Challenge request with an ACME - server - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - authorizationURL: - description: The URL to the ACME Authorization resource that this challenge - is a part of. - type: string - dnsName: - description: dnsName is the identifier that this challenge is for, e.g. - example.com. If the requested DNSName is a 'wildcard', this field - MUST be set to the non-wildcard domain, e.g. for `*.example.com`, - it must be `example.com`. - type: string - issuerRef: - description: References a properly configured ACME-type Issuer which - should be used to create this Challenge. If the Issuer does not exist, - processing will be retried. If the Issuer is not an 'ACME' Issuer, - an error will be returned and the Challenge will be marked as failed. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - key: - description: 'The ACME challenge key for this challenge For HTTP01 challenges, - this is the value that must be responded with to complete the HTTP01 - challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded - SHA256 sum of the `.` text that must be set as the TXT record content.' - type: string - solver: - description: Contains the domain solving configuration that should be - used to solve this challenge resource. - properties: - dns01: - description: Configures cert-manager to attempt to complete authorizations - by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to manage - DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage DNS01 - challenge records. - properties: - clientID: - description: if both this and ClientSecret are left unset - MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset MSI - will be used - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret then - this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 challenge - records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that tells - cert-manager in which Cloud DNS zone the challenge record - has to be created. If left empty cert-manager will automatically - choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with Cloudflare. - Note: using an API token to authenticate is now the recommended - method as it allows greater control of permissions.' - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when using - API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 challenge - records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain Name - System") (https://datatracker.ietf.org/doc/rfc2136/) to manage - DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. If - the host is an IPv6 address it must be enclosed in square - brackets (e.g [2001:db8::1]) ; port is optional. This - field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the DNS supporting - RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` - are defined. Supported values are (case-insensitive): - ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or - ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. If - ``tsigSecretSecretRef`` is defined, this field is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the TSIG - value. If ``tsigKeyName`` is defined, this field is required. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared credentials - file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only this - zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName - api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 provider - will assume using either the explicit credentials AccessKeyID/SecretAccessKey - or the inferred credentials from environment variables, - shared credentials file or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared credentials - file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 challenge - solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should be passed - to the webhook apiserver when challenges are processed. - This can contain arbitrary JSON data. Secret values should - not be specified in this stanza. If secret values are - needed (e.g. credentials for a DNS service), you should - use a SecretKeySelector to reference a Secret resource. - For details on the schema of this field, consult the webhook - provider implementation's documentation. - groupName: - description: The API group name that should be used when - POSTing ChallengePayload resources to the webhook apiserver. - This should be the same as the GroupName specified in - the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined in - the webhook provider implementation. This will typically - be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete authorizations - by performing the HTTP01 challenge flow. It is not possible to - obtain certificates for wildcard domain names (e.g. `*.example.com`) - using the HTTP01 challenge mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver will - solve challenges by creating or modifying Ingress resources - in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating Ingress - resources to solve ACME challenges that use this challenge - solver. Only one of 'class' or 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress used - to solve HTTP01 challenges. Only the 'labels' and - 'annotations' fields may be set. If labels or annotations - overlap with in-built values, the values here will - override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added to - the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to the - created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that should - have ACME challenge solving routes inserted into it in - order to solve HTTP01 challenges. This is typically used - in conjunction with ingress controllers like ingress-gce, - which maintains a 1:1 mapping between external IPs and - ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure the - ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod used to - solve HTTP01 challenges. Only the 'labels' and 'annotations' - fields may be set. If labels or annotations overlap - with in-built values, the values here will override - the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added to - the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to the - created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the HTTP01 - challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' and - 'tolerations' fields are supported currently. All - other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - affinity expressions specified by this - field, but it may choose a node that violates - one or more of the expressions. The node - that is most preferred is the one with - the greatest sum of weights, i.e. for - each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a - sum by iterating through the elements - of this field and adding "weight" to the - sum if the node matches the corresponding - matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling - term matches all objects with implicit - weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no - objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, - associated with the corresponding - weight. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with - matching the corresponding nodeSelectorTerm, - in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to an update), the system may or may - not try to eventually evict the pod from - its node. - properties: - nodeSelectorTerms: - description: Required. A list of node - selector terms. The terms are ORed. - items: - description: A null or empty node - selector term matches no objects. - The requirements of them are ANDed. - The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the same - node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - affinity expressions specified by this - field, but it may choose a node that violates - one or more of the expressions. The node - that is most preferred is the one with - the greatest sum of weights, i.e. for - each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a - sum by iterating through the elements - of this field and adding "weight" to the - sum if the node has pods which matches - the corresponding podAffinityTerm; the - node(s) with the highest sum are the most - preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key and - values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to a - set of values. Valid - operators are In, - NotIn, Exists and - DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, the - values array must - be non-empty. If the - operator is Exists - or DoesNotExist, the - values array must - be empty. This array - is replaced during - a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in - the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be - co-located (affinity) or not - co-located (anti-affinity) with - the pods matching the labelSelector - in the specified namespaces, - where co-located is defined - as running on a node whose value - of the label with key topologyKey - matches that of any node on - which any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to a pod label update), the system - may or may not try to eventually evict - the pod from its node. When there are - multiple elements, the lists of nodes - corresponding to each podAffinityTerm - are intersected, i.e. all terms must be - satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) with, - where co-located is defined as running - on a node whose value of the label with - key matches that of any - node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an - array of string values. - If the operator is In - or NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the values - array must be empty. This - array is replaced during - a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a - map of {key,value} pairs. A - single {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator - is "In", and the values array - contains only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); null - or empty list means "this pod's - namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running - on a node whose value of the label - with key topologyKey matches that - of any node on which any of the - selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling - rules (e.g. avoid putting this pod in the - same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - anti-affinity expressions specified by - this field, but it may choose a node that - violates one or more of the expressions. - The node that is most preferred is the - one with the greatest sum of weights, - i.e. for each node that meets all of the - scheduling requirements (resource request, - requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating - through the elements of this field and - adding "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with the - highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key and - values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to a - set of values. Valid - operators are In, - NotIn, Exists and - DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, the - values array must - be non-empty. If the - operator is Exists - or DoesNotExist, the - values array must - be empty. This array - is replaced during - a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in - the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be - co-located (affinity) or not - co-located (anti-affinity) with - the pods matching the labelSelector - in the specified namespaces, - where co-located is defined - as running on a node whose value - of the label with key topologyKey - matches that of any node on - which any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the anti-affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to a pod label update), the system - may or may not try to eventually evict - the pod from its node. When there are - multiple elements, the lists of nodes - corresponding to each podAffinityTerm - are intersected, i.e. all terms must be - satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) with, - where co-located is defined as running - on a node whose value of the label with - key matches that of any - node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an - array of string values. - If the operator is In - or NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the values - array must be empty. This - array is replaced during - a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a - map of {key,value} pairs. A - single {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator - is "In", and the values array - contains only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); null - or empty list means "this pod's - namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running - on a node whose value of the label - with key topologyKey matches that - of any node on which any of the - selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which must - be true for the pod to fit on a node. Selector - which must match a node''s labels for the pod - to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached - to tolerates any taint that matches the triple - using the matching operator - . - properties: - effect: - description: Effect indicates the taint effect - to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, - PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the - toleration applies to. Empty means match - all taint keys. If the key is empty, operator - must be Exists; this combination means to - match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists - and Equal. Defaults to Equal. Exists is - equivalent to wildcard for value, so that - a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration (which - must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By - default, it is not set, which means tolerate - the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the - toleration matches to. If the operator is - Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes solver - service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. If - not specified, the solver will be treated as the 'default' solver - with the lowest priority, i.e. if any other solver has a more - specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be used - to solve. If specified and a match is found, a dnsNames selector - will take precedence over a dnsZones selector. If multiple - solvers match with the same dnsNames value, the solver with - the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in - the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be used - to solve. The most specific DNS zone match specified here - will take precedence over other DNS zone matches, so a solver - specifying sys.example.com will be selected over one specifying - example.com for the domain www.sys.example.com. If multiple - solvers match with the same dnsZones value, the solver with - the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in - the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the set - of certificate's that this challenge solver will apply to. - type: object - type: object - type: object - token: - description: The ACME challenge token for this challenge. This is the - raw value returned from the ACME server. - type: string - type: - description: The type of ACME challenge this resource represents. One - of "HTTP-01" or "DNS-01". - enum: - - HTTP-01 - - DNS-01 - type: string - url: - description: The URL of the ACME Challenge resource for this challenge. - This can be used to lookup details about the status of this challenge. - type: string - wildcard: - description: wildcard will be true if this challenge is for a wildcard - identifier, for example '*.example.com'. - type: boolean - required: - - authorizationURL - - dnsName - - issuerRef - - key - - solver - - token - - type - - url - type: object - status: - properties: - presented: - description: presented will be set to true if the challenge values for - this challenge are currently 'presented'. This *does not* imply the - self check is passing. Only that the values have been 'submitted' - for the appropriate challenge mechanism (i.e. the DNS01 TXT record - has been presented, or the HTTP01 configuration has been configured). - type: boolean - processing: - description: Used to denote whether this challenge should be processed - or not. This field will only be set to true by the 'scheduling' component. - It will only be set to false by the 'challenges' controller, after - the challenge has reached a final state or timed out. If this field - is set to false, the challenge controller will not take any more action. - type: boolean - reason: - description: Contains human readable information on why the Challenge - is in the current state. - type: string - state: - description: Contains the current 'state' of the challenge. If not set, - the state of the challenge is unknown. - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - type: object - required: - - metadata - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: clusterissuers.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: ClusterIssuer - listKind: ClusterIssuerList - plural: clusterissuers - singular: clusterissuer - scope: Cluster - subresources: - status: {} - validation: - openAPIV3Schema: - description: A ClusterIssuer represents a certificate issuing authority which - can be referenced as part of `issuerRef` fields. It is similar to an Issuer, - however it is cluster-scoped and therefore can be referenced by resources - that exist in *any* namespace, not just the same namespace as the referent. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the ClusterIssuer resource. - properties: - acme: - description: ACME configures this issuer to communicate with a RFC8555 - (ACME) server to obtain signed x509 certificates. - properties: - disableAccountKeyGeneration: - description: Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account - but will expect the account key to be supplied via an existing - secret. If false, the cert-manager system will generate a new - ACME account key for the Issuer. Defaults to false. - type: boolean - email: - description: Email is the email address to be associated with the - ACME account. This field is optional, but it is strongly recommended - to be set. It will be used to contact you in case of issues with - your account or certificates, including expiry notification emails. - This field may be updated after the account is initially registered. - type: string - enableDurationFeature: - description: Enables requesting a Not After date on certificates - that matches the duration of the certificate. This is not supported - by all ACME servers like Let's Encrypt. If set to true when the - ACME server does not support it it will create an error on the - Order. Defaults to false. - type: boolean - externalAccountBinding: - description: ExternalAccountBinding is a reference to a CA external - account of the ACME server. If set, upon registration cert-manager - will attempt to associate the given external account credentials - with the registered ACME account. - properties: - keyAlgorithm: - description: keyAlgorithm is the MAC key algorithm that the - key is used for. Valid values are "HS256", "HS384" and "HS512". - enum: - - HS256 - - HS384 - - HS512 - type: string - keyID: - description: keyID is the ID of the CA key that the External - Account is bound to. - type: string - keySecretRef: - description: keySecretRef is a Secret Key Selector referencing - a data item in a Kubernetes Secret which holds the symmetric - MAC key of the External Account Binding. The `key` is the - index string that is paired with the key data in the Secret - and should not be confused with the key data itself, or indeed - with the External Account Binding keyID above. The secret - key stored in the Secret **must** be un-padded, base64 URL - encoded data. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - keyAlgorithm - - keyID - - keySecretRef - type: object - preferredChain: - description: 'PreferredChain is the chain to use if the ACME server - outputs multiple. PreferredChain is no guarantee that this one - gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s - DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" - for the newer Let''s Encrypt root CA. This value picks the first - certificate bundle in the ACME alternative chains that has a certificate - with this value as its issuer''s CN' - maxLength: 64 - type: string - privateKeySecretRef: - description: PrivateKey is the name of a Kubernetes Secret resource - that will be used to store the automatically generated ACME account - private key. Optionally, a `key` may be specified to select a - specific entry within the named Secret resource. If `key` is not - specified, a default of `tls.key` will be used. - properties: - key: - description: The key of the entry in the Secret resource's `data` - field to be used. Some instances of this field may be defaulted, - in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - server: - description: 'Server is the URL used to access the ACME server''s - ''directory'' endpoint. For example, for Let''s Encrypt''s staging - endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported.' - type: string - skipTLSVerify: - description: Enables or disables validation of the ACME server TLS - certificate. If true, requests to the ACME server will not have - their TLS certificate validated (i.e. insecure connections will - be allowed). Only enable this option in development environments. - The cert-manager system installed roots will be used to verify - connections to the ACME server if this is false. Defaults to false. - type: boolean - solvers: - description: 'Solvers is a list of challenge solvers that will be - used to solve ACME challenges for the matching domains. Solver - configurations must be provided in order to obtain certificates - from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' - items: - description: Configures an issuer to solve challenges using the - specified options. Only one of HTTP01 or DNS01 may be provided. - properties: - dns01: - description: Configures cert-manager to attempt to complete - authorizations by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to - manage DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage - DNS01 challenge records. - properties: - clientID: - description: if both this and ClientSecret are left - unset MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset - MSI will be used - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret - then this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 - challenge records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that - tells cert-manager in which Cloud DNS zone the challenge - record has to be created. If left empty cert-manager - will automatically choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with - Cloudflare. Note: using an API token to authenticate - is now the recommended method as it allows greater - control of permissions.' - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when - using API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 - challenge records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain - Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. - If the host is an IPv6 address it must be enclosed - in square brackets (e.g [2001:db8::1]) ; port is - optional. This field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the - DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` - and ``tsigKeyName`` are defined. Supported values - are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, - ``HMACSHA256`` or ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field - is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the - TSIG value. If ``tsigKeyName`` is defined, this - field is required. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only - this zone in Route53 and will not do an lookup using - the route53:ListHostedZonesByName api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 - provider will assume using either the explicit credentials - AccessKeyID/SecretAccessKey or the inferred credentials - from environment variables, shared credentials file - or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 - challenge solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should - be passed to the webhook apiserver when challenges - are processed. This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for - a DNS service), you should use a SecretKeySelector - to reference a Secret resource. For details on the - schema of this field, consult the webhook provider - implementation's documentation. - groupName: - description: The API group name that should be used - when POSTing ChallengePayload resources to the webhook - apiserver. This should be the same as the GroupName - specified in the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined - in the webhook provider implementation. This will - typically be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete - authorizations by performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain - names (e.g. `*.example.com`) using the HTTP01 challenge - mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver - will solve challenges by creating or modifying Ingress - resources in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating - Ingress resources to solve ACME challenges that - use this challenge solver. Only one of 'class' or - 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 - challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that - should have ACME challenge solving routes inserted - into it in order to solve HTTP01 challenges. This - is typically used in conjunction with ingress controllers - like ingress-gce, which maintains a 1:1 mapping - between external IPs and ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure - the ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the - HTTP01 challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' - and 'tolerations' fields are supported currently. - All other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node matches the - corresponding matchExpressions; - the node(s) with the highest sum - are the most preferred. - items: - description: An empty preferred - scheduling term matches all objects - with implicit weight 0 (i.e. it's - a no-op). A null preferred scheduling - term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector - term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated - with matching the corresponding - nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to an update), the system - may or may not try to eventually - evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list - of node selector terms. The - terms are ORed. - items: - description: A null or empty - node selector term matches - no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of - the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the - same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node has pods - which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to a pod label update), - the system may or may not try to - eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity - scheduling rules (e.g. avoid putting - this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the anti-affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - anti-affinity expressions, etc.), - compute a sum by iterating through - the elements of this field and adding - "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity - requirements specified by this field - are not met at scheduling time, - the pod will not be scheduled onto - the node. If the anti-affinity requirements - specified by this field cease to - be met at some point during pod - execution (e.g. due to a pod label - update), the system may or may not - try to eventually evict the pod - from its node. When there are multiple - elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which - must be true for the pod to fit on a node. - Selector which must match a node''s labels - for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service - account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is - attached to tolerates any taint that matches - the triple using the - matching operator . - properties: - effect: - description: Effect indicates the taint - effect to match. Empty means match - all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Key is the taint key that - the toleration applies to. Empty means - match all taint keys. If the key is - empty, operator must be Exists; this - combination means to match all values - and all keys. - type: string - operator: - description: Operator represents a key's - relationship to the value. Valid operators - are Exists and Equal. Defaults to - Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration - (which must be of effect NoExecute, - otherwise this field is ignored) tolerates - the taint. By default, it is not set, - which means tolerate the taint forever - (do not evict). Zero and negative - values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value - the toleration matches to. If the - operator is Exists, the value should - be empty, otherwise just a regular - string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes - solver service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' - solver with the lowest priority, i.e. if any other solver - has a more specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be - used to solve. If specified and a match is found, a - dnsNames selector will take precedence over a dnsZones - selector. If multiple solvers match with the same dnsNames - value, the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be - used to solve. The most specific DNS zone match specified - here will take precedence over other DNS zone matches, - so a solver specifying sys.example.com will be selected - over one specifying example.com for the domain www.sys.example.com. - If multiple solvers match with the same dnsZones value, - the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the - set of certificate's that this challenge solver will - apply to. - type: object - type: object - type: object - type: array - required: - - privateKeySecretRef - - server - type: object - ca: - description: CA configures this issuer to sign certificates using a - signing CA keypair stored in a Secret resource. This is used to build - internal PKIs that are managed by cert-manager. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set, - certificates will be issued without distribution points set. - items: - type: string - type: array - secretName: - description: SecretName is the name of the secret used to sign Certificates - issued by this Issuer. - type: string - required: - - secretName - type: object - selfSigned: - description: SelfSigned configures this issuer to 'self sign' certificates - using the private key used to create the CertificateRequest object. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set - certificate will be issued without CDP. Values are strings. - items: - type: string - type: array - type: object - vault: - description: Vault configures this issuer to sign certificates using - a HashiCorp Vault PKI backend. - properties: - auth: - description: Auth configures how cert-manager authenticates with - the Vault server. - properties: - appRole: - description: AppRole authenticates with Vault using the App - Role auth mechanism, with the role and secret stored in a - Kubernetes Secret resource. - properties: - path: - description: 'Path where the App Role authentication backend - is mounted in Vault, e.g: "approle"' - type: string - roleId: - description: RoleID configured in the App Role authentication - backend when setting up the authentication backend in - Vault. - type: string - secretRef: - description: Reference to a key in a Secret that contains - the App Role secret used to authenticate with Vault. The - `key` field must be specified and denotes which entry - within the Secret resource is used as the app role secret. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - path - - roleId - - secretRef - type: object - kubernetes: - description: Kubernetes authenticates with Vault by passing - the ServiceAccount token stored in the named Secret resource - to the Vault server. - properties: - mountPath: - description: The Vault mountPath here is the mount path - to use when authenticating with Vault. For example, setting - a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` - to authenticate with Vault. If unspecified, the default - value "/v1/auth/kubernetes" will be used. - type: string - role: - description: A required field containing the Vault Role - to assume. A Role binds a Kubernetes ServiceAccount with - a set of Vault policies. - type: string - secretRef: - description: The required Secret field containing a Kubernetes - ServiceAccount JWT used for authenticating with Vault. - Use of 'ambient credentials' is not supported. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - role - - secretRef - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by presenting - a token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - type: object - caBundle: - description: PEM encoded CA bundle used to validate Vault server - certificate. Only used if the Server URL is using HTTPS protocol. - This parameter is ignored for plain HTTP protocol connection. - If not set the system root certificates are used to validate the - TLS connection. - format: byte - type: string - namespace: - description: 'Name of the vault namespace. Namespaces is a set of - features within Vault Enterprise that allows Vault environments - to support Secure Multi-tenancy. e.g: "ns1" More about namespaces - can be found here https://www.vaultproject.io/docs/enterprise/namespaces' - type: string - path: - description: 'Path is the mount path of the Vault PKI backend''s - `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' - type: string - server: - description: 'Server is the connection address for the Vault server, - e.g: "https://vault.example.com:8200".' - type: string - required: - - auth - - path - - server - type: object - venafi: - description: Venafi configures this issuer to sign certificates using - a Venafi TPP or Venafi Cloud policy zone. - properties: - cloud: - description: Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. - properties: - apiTokenSecretRef: - description: APITokenSecretRef is a secret key selector for - the Venafi Cloud API token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: URL is the base URL for Venafi Cloud. Defaults - to "https://api.venafi.cloud/v1". - type: string - required: - - apiTokenSecretRef - type: object - tpp: - description: TPP specifies Trust Protection Platform configuration - settings. Only one of TPP or Cloud may be specified. - properties: - caBundle: - description: CABundle is a PEM encoded TLS certificate to use - to verify connections to the TPP instance. If specified, system - roots will not be used and the issuing CA for the TPP instance - must be verifiable using the provided root. If not specified, - the connection will be verified using the cert-manager system - root certificates. - format: byte - type: string - credentialsRef: - description: CredentialsRef is a reference to a Secret containing - the username and password for the TPP server. The secret must - contain two keys, 'username' and 'password'. - properties: - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: 'URL is the base URL for the vedsdk endpoint of - the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' - type: string - required: - - credentialsRef - - url - type: object - zone: - description: Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by - the named zone policy. This field is required. - type: string - required: - - zone - type: object - type: object - status: - description: Status of the ClusterIssuer. This is set and managed automatically. - properties: - acme: - description: ACME specific status options. This field should only be - set if the Issuer is configured to use an ACME server to issue certificates. - properties: - lastRegisteredEmail: - description: LastRegisteredEmail is the email associated with the - latest registered ACME account, in order to track changes made - to registered account associated with the Issuer - type: string - uri: - description: URI is the unique account identifier, which can also - be used to retrieve account details from the CA - type: string - type: object - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. - items: - description: IssuerCondition contains condition information for an - Issuer. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready'). - type: string - required: - - status - - type - type: object - type: array - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: issuers.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: Issuer - listKind: IssuerList - plural: issuers - singular: issuer - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: An Issuer represents a certificate issuing authority which can - be referenced as part of `issuerRef` fields. It is scoped to a single namespace - and can therefore only be referenced by resources within the same namespace. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the Issuer resource. - properties: - acme: - description: ACME configures this issuer to communicate with a RFC8555 - (ACME) server to obtain signed x509 certificates. - properties: - disableAccountKeyGeneration: - description: Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account - but will expect the account key to be supplied via an existing - secret. If false, the cert-manager system will generate a new - ACME account key for the Issuer. Defaults to false. - type: boolean - email: - description: Email is the email address to be associated with the - ACME account. This field is optional, but it is strongly recommended - to be set. It will be used to contact you in case of issues with - your account or certificates, including expiry notification emails. - This field may be updated after the account is initially registered. - type: string - enableDurationFeature: - description: Enables requesting a Not After date on certificates - that matches the duration of the certificate. This is not supported - by all ACME servers like Let's Encrypt. If set to true when the - ACME server does not support it it will create an error on the - Order. Defaults to false. - type: boolean - externalAccountBinding: - description: ExternalAccountBinding is a reference to a CA external - account of the ACME server. If set, upon registration cert-manager - will attempt to associate the given external account credentials - with the registered ACME account. - properties: - keyAlgorithm: - description: keyAlgorithm is the MAC key algorithm that the - key is used for. Valid values are "HS256", "HS384" and "HS512". - enum: - - HS256 - - HS384 - - HS512 - type: string - keyID: - description: keyID is the ID of the CA key that the External - Account is bound to. - type: string - keySecretRef: - description: keySecretRef is a Secret Key Selector referencing - a data item in a Kubernetes Secret which holds the symmetric - MAC key of the External Account Binding. The `key` is the - index string that is paired with the key data in the Secret - and should not be confused with the key data itself, or indeed - with the External Account Binding keyID above. The secret - key stored in the Secret **must** be un-padded, base64 URL - encoded data. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - keyAlgorithm - - keyID - - keySecretRef - type: object - preferredChain: - description: 'PreferredChain is the chain to use if the ACME server - outputs multiple. PreferredChain is no guarantee that this one - gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s - DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" - for the newer Let''s Encrypt root CA. This value picks the first - certificate bundle in the ACME alternative chains that has a certificate - with this value as its issuer''s CN' - maxLength: 64 - type: string - privateKeySecretRef: - description: PrivateKey is the name of a Kubernetes Secret resource - that will be used to store the automatically generated ACME account - private key. Optionally, a `key` may be specified to select a - specific entry within the named Secret resource. If `key` is not - specified, a default of `tls.key` will be used. - properties: - key: - description: The key of the entry in the Secret resource's `data` - field to be used. Some instances of this field may be defaulted, - in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - server: - description: 'Server is the URL used to access the ACME server''s - ''directory'' endpoint. For example, for Let''s Encrypt''s staging - endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported.' - type: string - skipTLSVerify: - description: Enables or disables validation of the ACME server TLS - certificate. If true, requests to the ACME server will not have - their TLS certificate validated (i.e. insecure connections will - be allowed). Only enable this option in development environments. - The cert-manager system installed roots will be used to verify - connections to the ACME server if this is false. Defaults to false. - type: boolean - solvers: - description: 'Solvers is a list of challenge solvers that will be - used to solve ACME challenges for the matching domains. Solver - configurations must be provided in order to obtain certificates - from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' - items: - description: Configures an issuer to solve challenges using the - specified options. Only one of HTTP01 or DNS01 may be provided. - properties: - dns01: - description: Configures cert-manager to attempt to complete - authorizations by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to - manage DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage - DNS01 challenge records. - properties: - clientID: - description: if both this and ClientSecret are left - unset MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset - MSI will be used - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret - then this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 - challenge records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that - tells cert-manager in which Cloud DNS zone the challenge - record has to be created. If left empty cert-manager - will automatically choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with - Cloudflare. Note: using an API token to authenticate - is now the recommended method as it allows greater - control of permissions.' - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when - using API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 - challenge records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain - Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. - If the host is an IPv6 address it must be enclosed - in square brackets (e.g [2001:db8::1]) ; port is - optional. This field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the - DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` - and ``tsigKeyName`` are defined. Supported values - are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, - ``HMACSHA256`` or ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field - is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the - TSIG value. If ``tsigKeyName`` is defined, this - field is required. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only - this zone in Route53 and will not do an lookup using - the route53:ListHostedZonesByName api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 - provider will assume using either the explicit credentials - AccessKeyID/SecretAccessKey or the inferred credentials - from environment variables, shared credentials file - or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 - challenge solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should - be passed to the webhook apiserver when challenges - are processed. This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for - a DNS service), you should use a SecretKeySelector - to reference a Secret resource. For details on the - schema of this field, consult the webhook provider - implementation's documentation. - groupName: - description: The API group name that should be used - when POSTing ChallengePayload resources to the webhook - apiserver. This should be the same as the GroupName - specified in the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined - in the webhook provider implementation. This will - typically be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete - authorizations by performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain - names (e.g. `*.example.com`) using the HTTP01 challenge - mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver - will solve challenges by creating or modifying Ingress - resources in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating - Ingress resources to solve ACME challenges that - use this challenge solver. Only one of 'class' or - 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 - challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that - should have ACME challenge solving routes inserted - into it in order to solve HTTP01 challenges. This - is typically used in conjunction with ingress controllers - like ingress-gce, which maintains a 1:1 mapping - between external IPs and ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure - the ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the - HTTP01 challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' - and 'tolerations' fields are supported currently. - All other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node matches the - corresponding matchExpressions; - the node(s) with the highest sum - are the most preferred. - items: - description: An empty preferred - scheduling term matches all objects - with implicit weight 0 (i.e. it's - a no-op). A null preferred scheduling - term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector - term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated - with matching the corresponding - nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to an update), the system - may or may not try to eventually - evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list - of node selector terms. The - terms are ORed. - items: - description: A null or empty - node selector term matches - no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of - the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the - same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node has pods - which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to a pod label update), - the system may or may not try to - eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity - scheduling rules (e.g. avoid putting - this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the anti-affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - anti-affinity expressions, etc.), - compute a sum by iterating through - the elements of this field and adding - "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity - requirements specified by this field - are not met at scheduling time, - the pod will not be scheduled onto - the node. If the anti-affinity requirements - specified by this field cease to - be met at some point during pod - execution (e.g. due to a pod label - update), the system may or may not - try to eventually evict the pod - from its node. When there are multiple - elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which - must be true for the pod to fit on a node. - Selector which must match a node''s labels - for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service - account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is - attached to tolerates any taint that matches - the triple using the - matching operator . - properties: - effect: - description: Effect indicates the taint - effect to match. Empty means match - all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Key is the taint key that - the toleration applies to. Empty means - match all taint keys. If the key is - empty, operator must be Exists; this - combination means to match all values - and all keys. - type: string - operator: - description: Operator represents a key's - relationship to the value. Valid operators - are Exists and Equal. Defaults to - Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration - (which must be of effect NoExecute, - otherwise this field is ignored) tolerates - the taint. By default, it is not set, - which means tolerate the taint forever - (do not evict). Zero and negative - values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value - the toleration matches to. If the - operator is Exists, the value should - be empty, otherwise just a regular - string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes - solver service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' - solver with the lowest priority, i.e. if any other solver - has a more specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be - used to solve. If specified and a match is found, a - dnsNames selector will take precedence over a dnsZones - selector. If multiple solvers match with the same dnsNames - value, the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be - used to solve. The most specific DNS zone match specified - here will take precedence over other DNS zone matches, - so a solver specifying sys.example.com will be selected - over one specifying example.com for the domain www.sys.example.com. - If multiple solvers match with the same dnsZones value, - the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the - set of certificate's that this challenge solver will - apply to. - type: object - type: object - type: object - type: array - required: - - privateKeySecretRef - - server - type: object - ca: - description: CA configures this issuer to sign certificates using a - signing CA keypair stored in a Secret resource. This is used to build - internal PKIs that are managed by cert-manager. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set, - certificates will be issued without distribution points set. - items: - type: string - type: array - secretName: - description: SecretName is the name of the secret used to sign Certificates - issued by this Issuer. - type: string - required: - - secretName - type: object - selfSigned: - description: SelfSigned configures this issuer to 'self sign' certificates - using the private key used to create the CertificateRequest object. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set - certificate will be issued without CDP. Values are strings. - items: - type: string - type: array - type: object - vault: - description: Vault configures this issuer to sign certificates using - a HashiCorp Vault PKI backend. - properties: - auth: - description: Auth configures how cert-manager authenticates with - the Vault server. - properties: - appRole: - description: AppRole authenticates with Vault using the App - Role auth mechanism, with the role and secret stored in a - Kubernetes Secret resource. - properties: - path: - description: 'Path where the App Role authentication backend - is mounted in Vault, e.g: "approle"' - type: string - roleId: - description: RoleID configured in the App Role authentication - backend when setting up the authentication backend in - Vault. - type: string - secretRef: - description: Reference to a key in a Secret that contains - the App Role secret used to authenticate with Vault. The - `key` field must be specified and denotes which entry - within the Secret resource is used as the app role secret. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - path - - roleId - - secretRef - type: object - kubernetes: - description: Kubernetes authenticates with Vault by passing - the ServiceAccount token stored in the named Secret resource - to the Vault server. - properties: - mountPath: - description: The Vault mountPath here is the mount path - to use when authenticating with Vault. For example, setting - a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` - to authenticate with Vault. If unspecified, the default - value "/v1/auth/kubernetes" will be used. - type: string - role: - description: A required field containing the Vault Role - to assume. A Role binds a Kubernetes ServiceAccount with - a set of Vault policies. - type: string - secretRef: - description: The required Secret field containing a Kubernetes - ServiceAccount JWT used for authenticating with Vault. - Use of 'ambient credentials' is not supported. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - role - - secretRef - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by presenting - a token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - type: object - caBundle: - description: PEM encoded CA bundle used to validate Vault server - certificate. Only used if the Server URL is using HTTPS protocol. - This parameter is ignored for plain HTTP protocol connection. - If not set the system root certificates are used to validate the - TLS connection. - format: byte - type: string - namespace: - description: 'Name of the vault namespace. Namespaces is a set of - features within Vault Enterprise that allows Vault environments - to support Secure Multi-tenancy. e.g: "ns1" More about namespaces - can be found here https://www.vaultproject.io/docs/enterprise/namespaces' - type: string - path: - description: 'Path is the mount path of the Vault PKI backend''s - `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' - type: string - server: - description: 'Server is the connection address for the Vault server, - e.g: "https://vault.example.com:8200".' - type: string - required: - - auth - - path - - server - type: object - venafi: - description: Venafi configures this issuer to sign certificates using - a Venafi TPP or Venafi Cloud policy zone. - properties: - cloud: - description: Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. - properties: - apiTokenSecretRef: - description: APITokenSecretRef is a secret key selector for - the Venafi Cloud API token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: URL is the base URL for Venafi Cloud. Defaults - to "https://api.venafi.cloud/v1". - type: string - required: - - apiTokenSecretRef - type: object - tpp: - description: TPP specifies Trust Protection Platform configuration - settings. Only one of TPP or Cloud may be specified. - properties: - caBundle: - description: CABundle is a PEM encoded TLS certificate to use - to verify connections to the TPP instance. If specified, system - roots will not be used and the issuing CA for the TPP instance - must be verifiable using the provided root. If not specified, - the connection will be verified using the cert-manager system - root certificates. - format: byte - type: string - credentialsRef: - description: CredentialsRef is a reference to a Secret containing - the username and password for the TPP server. The secret must - contain two keys, 'username' and 'password'. - properties: - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: 'URL is the base URL for the vedsdk endpoint of - the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' - type: string - required: - - credentialsRef - - url - type: object - zone: - description: Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by - the named zone policy. This field is required. - type: string - required: - - zone - type: object - type: object - status: - description: Status of the Issuer. This is set and managed automatically. - properties: - acme: - description: ACME specific status options. This field should only be - set if the Issuer is configured to use an ACME server to issue certificates. - properties: - lastRegisteredEmail: - description: LastRegisteredEmail is the email associated with the - latest registered ACME account, in order to track changes made - to registered account associated with the Issuer - type: string - uri: - description: URI is the unique account identifier, which can also - be used to retrieve account details from the CA - type: string - type: object - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. - items: - description: IssuerCondition contains condition information for an - Issuer. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready'). - type: string - required: - - status - - type - type: object - type: array - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: orders.acme.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.state - name: State - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.reason - name: Reason - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: acme.cert-manager.io - names: - kind: Order - listKind: OrderList - plural: orders - singular: order - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Order is a type to represent an Order with an ACME server - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - commonName: - description: CommonName is the common name as specified on the DER encoded - CSR. If specified, this value must also be present in `dnsNames` or - `ipAddresses`. This field must match the corresponding field on the - DER encoded CSR. - type: string - dnsNames: - description: DNSNames is a list of DNS names that should be included - as part of the Order validation process. This field must match the - corresponding field on the DER encoded CSR. - items: - type: string - type: array - duration: - description: Duration is the duration for the not after date for the - requested certificate. this is set on order creation as pe the ACME - spec. - type: string - ipAddresses: - description: IPAddresses is a list of IP addresses that should be included - as part of the Order validation process. This field must match the - corresponding field on the DER encoded CSR. - items: - type: string - type: array - issuerRef: - description: IssuerRef references a properly configured ACME-type Issuer - which should be used to create this Order. If the Issuer does not - exist, processing will be retried. If the Issuer is not an 'ACME' - Issuer, an error will be returned and the Order will be marked as - failed. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - request: - description: Certificate signing request bytes in DER encoding. This - will be used when finalizing the order. This field must be set on - the order. - format: byte - type: string - required: - - issuerRef - - request - type: object - status: - properties: - authorizations: - description: Authorizations contains data returned from the ACME server - on what authorizations must be completed in order to validate the - DNS names specified on the Order. - items: - description: ACMEAuthorization contains data returned from the ACME - server on an authorization that must be completed in order validate - a DNS name on an ACME Order resource. - properties: - challenges: - description: Challenges specifies the challenge types offered - by the ACME server. One of these challenge types will be selected - when validating the DNS name and an appropriate Challenge resource - will be created to perform the ACME challenge process. - items: - description: Challenge specifies a challenge offered by the - ACME server for an Order. An appropriate Challenge resource - can be created to perform the ACME challenge process. - properties: - token: - description: Token is the token that must be presented for - this challenge. This is used to compute the 'key' that - must also be presented. - type: string - type: - description: Type is the type of challenge being offered, - e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the - raw value retrieved from the ACME server. Only 'http-01' - and 'dns-01' are supported by cert-manager, other values - will be ignored. - type: string - url: - description: URL is the URL of this challenge. It can be - used to retrieve additional metadata about the Challenge - from the ACME server. - type: string - required: - - token - - type - - url - type: object - type: array - identifier: - description: Identifier is the DNS name to be validated as part - of this authorization - type: string - initialState: - description: InitialState is the initial state of the ACME authorization - when first fetched from the ACME server. If an Authorization - is already 'valid', the Order controller will not create a Challenge - resource for the authorization. This will occur when working - with an ACME server that enables 'authz reuse' (such as Let's - Encrypt's production endpoint). If not set and 'identifier' - is set, the state is assumed to be pending and a Challenge will - be created. - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - url: - description: URL is the URL of the Authorization that must be - completed - type: string - wildcard: - description: Wildcard will be true if this authorization is for - a wildcard DNS name. If this is true, the identifier will be - the *non-wildcard* version of the DNS name. For example, if - '*.example.com' is the DNS name being validated, this field - will be 'true' and the 'identifier' field will be 'example.com'. - type: boolean - required: - - url - type: object - type: array - certificate: - description: Certificate is a copy of the PEM encoded certificate for - this Order. This field will be populated after the order has been - successfully finalized with the ACME server, and the order has transitioned - to the 'valid' state. - format: byte - type: string - failureTime: - description: FailureTime stores the time that this order failed. This - is used to influence garbage collection and back-off. - format: date-time - type: string - finalizeURL: - description: FinalizeURL of the Order. This is used to obtain certificates - for this order once it has been completed. - type: string - reason: - description: Reason optionally provides more information about a why - the order is in the current state. - type: string - state: - description: State contains the current state of this Order resource. - States 'success' and 'expired' are 'final' - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - url: - description: URL of the Order. This will initially be empty when the - resource is first created. The Order controller will populate this - field when the Order is first processed. This field will be immutable - after it is initially set. - type: string - type: object - required: - - metadata - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - -{{- end }} -{{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml b/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml index 778efb2a..80110d79 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml @@ -1,4 +1,3 @@ -{{- if (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} {{- if .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -22,6 +21,8 @@ spec: - cr - crs singular: certificaterequest + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -56,7 +57,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object properties: apiVersion: @@ -85,7 +86,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -162,14 +163,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -198,7 +199,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object properties: apiVersion: @@ -227,7 +228,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -304,14 +305,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -340,7 +341,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object required: - spec @@ -367,7 +368,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -448,14 +449,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -484,7 +485,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object required: - spec @@ -511,7 +512,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -592,14 +593,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -636,6 +637,8 @@ spec: - cert - certs singular: certificate + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -719,7 +722,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -734,19 +737,19 @@ spec: description: Name of the resource being referred to. type: string keyAlgorithm: - description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. type: string enum: - rsa - ecdsa keyEncoding: - description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then PKCS#1 will be used by default. + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. type: string enum: - pkcs1 - pkcs8 keySize: - description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. type: integer keystores: description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. @@ -911,14 +914,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1014,7 +1017,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1029,19 +1032,19 @@ spec: description: Name of the resource being referred to. type: string keyAlgorithm: - description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. type: string enum: - rsa - ecdsa keyEncoding: - description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then PKCS#1 will be used by default. + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. type: string enum: - pkcs1 - pkcs8 keySize: - description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. type: integer keystores: description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. @@ -1055,7 +1058,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. @@ -1077,7 +1080,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. @@ -1206,14 +1209,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1311,7 +1314,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1378,13 +1381,13 @@ spec: type: object properties: algorithm: - description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `algorithm` is specified and `size` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. type: string enum: - RSA - ECDSA encoding: - description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. type: string enum: - PKCS1 @@ -1503,14 +1506,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1608,7 +1611,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1634,7 +1637,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. @@ -1656,7 +1659,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. @@ -1675,13 +1678,13 @@ spec: type: object properties: algorithm: - description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `algorithm` is specified and `size` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. type: string enum: - RSA - ECDSA encoding: - description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. type: string enum: - PKCS1 @@ -1800,14 +1803,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1859,6 +1862,9 @@ spec: listKind: ChallengeList plural: challenges singular: challenge + categories: + - cert-manager + - cert-manager-acme scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -5200,6 +5206,8 @@ spec: listKind: ClusterIssuerList plural: clusterissuers singular: clusterissuer + categories: + - cert-manager scope: Cluster conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -6043,6 +6051,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -6231,14 +6244,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -7072,6 +7085,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -7260,14 +7278,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -8103,6 +8121,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -8291,14 +8314,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -9134,6 +9157,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -9322,14 +9350,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: true @@ -9359,6 +9387,8 @@ spec: listKind: IssuerList plural: issuers singular: issuer + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -10202,6 +10232,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -10390,14 +10425,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -11231,6 +11266,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -11419,14 +11459,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -12262,6 +12302,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -12450,14 +12495,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -13293,6 +13338,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -13481,14 +13531,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: true @@ -13518,6 +13568,9 @@ spec: listKind: OrderList plural: orders singular: order + categories: + - cert-manager + - cert-manager-acme scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -14169,4 +14222,3 @@ status: storedVersions: [] --- {{- end }} -{{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml b/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml index 3daeaff2..39a69185 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml @@ -86,7 +86,18 @@ spec: {{- else }} - --cluster-resource-namespace=$(POD_NAMESPACE) {{- end }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- with .Values.global.leaderElection }} + - --leader-election-namespace={{ .namespace }} + {{- if .leaseDuration }} + - --leader-election-lease-duration={{ .leaseDuration }} + {{- end }} + {{- if .renewDeadline }} + - --leader-election-renew-deadline={{ .renewDeadline }} + {{- end }} + {{- if .retryPeriod }} + - --leader-election-retry-period={{ .retryPeriod }} + {{- end }} + {{- end }} {{- if .Values.extraArgs }} {{ toYaml .Values.extraArgs | indent 10 }} {{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml b/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml index c7244595..a9b85392 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml @@ -220,7 +220,7 @@ rules: - apiGroups: [""] resources: ["pods", "services"] verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses"] verbs: ["get", "list", "watch", "create", "delete", "update"] # We require the ability to specify a custom hostname when we are creating @@ -261,13 +261,13 @@ rules: - apiGroups: ["cert-manager.io"] resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses"] verbs: ["get", "list", "watch"] # We require these rules to support users with the OwnerReferencesPermissionEnforcement # admission controller enabled: # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses/finalizers"] verbs: ["update"] - apiGroups: [""] diff --git a/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml index 56ff07b7..99309c0e 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml @@ -1,10 +1,4 @@ -{{- $isV1AdmissionRegistration := false -}} -{{- if (or (not (.Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1")) (.Capabilities.APIVersions.Has "hacking-helm.i-wish-this-wasnt-required.cert-manager.io/force-v1beta1-webhooks") ) }} -apiVersion: admissionregistration.k8s.io/v1beta1 -{{- else }} -{{- $isV1AdmissionRegistration = true -}} apiVersion: admissionregistration.k8s.io/v1 -{{- end }} kind: MutatingWebhookConfiguration metadata: name: {{ include "webhook.fullname" . }} @@ -33,21 +27,12 @@ webhooks: - UPDATE resources: - "*/*" - {{- if $isV1AdmissionRegistration }} admissionReviewVersions: ["v1", "v1beta1"] timeoutSeconds: {{ .Values.webhook.timeoutSeconds }} - {{- end }} failurePolicy: Fail -{{- if (semverCompare ">=1.12-0" .Capabilities.KubeVersion.GitVersion) }} # Only include 'sideEffects' field in Kubernetes 1.12+ sideEffects: None -{{- end }} clientConfig: -{{- if (semverCompare "<=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Set caBundle to empty to avoid https://github.com/kubernetes/kubernetes/pull/70138 - # in Kubernetes 1.12 and below. - caBundle: "" -{{- end }} service: name: {{ template "webhook.fullname" . }} namespace: {{ .Release.Namespace | quote }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml b/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml index c45461e0..64c8d73d 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml @@ -1,10 +1,4 @@ -{{- $isV1AdmissionRegistration := false -}} -{{- if (or (not (.Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1")) (.Capabilities.APIVersions.Has "hacking-helm.i-wish-this-wasnt-required.cert-manager.io/force-v1beta1-webhooks") ) }} -apiVersion: admissionregistration.k8s.io/v1beta1 -{{- else }} -{{- $isV1AdmissionRegistration = true -}} apiVersion: admissionregistration.k8s.io/v1 -{{- end }} kind: ValidatingWebhookConfiguration metadata: name: {{ include "webhook.fullname" . }} @@ -43,21 +37,11 @@ webhooks: - UPDATE resources: - "*/*" - {{- if $isV1AdmissionRegistration }} admissionReviewVersions: ["v1", "v1beta1"] timeoutSeconds: {{ .Values.webhook.timeoutSeconds }} - {{- end }} failurePolicy: Fail -{{- if (semverCompare ">=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Only include 'sideEffects' field in Kubernetes 1.12+ sideEffects: None -{{- end }} clientConfig: -{{- if (semverCompare "<=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Set caBundle to empty to avoid https://github.com/kubernetes/kubernetes/pull/70138 - # in Kubernetes 1.12 and below. - caBundle: "" -{{- end }} service: name: {{ template "webhook.fullname" . }} namespace: {{ .Release.Namespace | quote }} diff --git a/lib/common/bootstrap/charts/cert-manager/values.yaml b/lib/common/bootstrap/charts/cert-manager/values.yaml index e7f0f09f..9f5bb85c 100644 --- a/lib/common/bootstrap/charts/cert-manager/values.yaml +++ b/lib/common/bootstrap/charts/cert-manager/values.yaml @@ -24,6 +24,21 @@ global: # Override the namespace used to store the ConfigMap for leader election namespace: "kube-system" + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + installCRDs: false replicaCount: 1 diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index c55f287e..c9b70c2a 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -1,6 +1,6 @@ charts: - name: cert-manager - version: v1.1.1 + version: v1.2.0 repo_name: jetstack - name: external-dns repo_name: bitnami diff --git a/src/cloud_provider/aws/kubernetes/helm_charts.rs b/src/cloud_provider/aws/kubernetes/helm_charts.rs index 59d919a5..89f98bfe 100644 --- a/src/cloud_provider/aws/kubernetes/helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/helm_charts.rs @@ -795,6 +795,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index f6db1401..3e9f733e 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -611,6 +611,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 8e252bff..5f8e1702 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -2,6 +2,9 @@ use crate::cloud_provider::helm::HelmAction::Deploy; use crate::cloud_provider::helm::HelmChartNamespaces::KubeSystem; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAppName, QoveryShellAgent}; use crate::cmd::helm::{to_command_error, Helm}; +use crate::cmd::helm_utils::{ + apply_chart_backup, delete_unused_chart_backup, prepare_chart_backup_on_upgrade, BackupStatus, +}; use crate::cmd::kubectl::{ kubectl_delete_crash_looping_pods, kubectl_exec_delete_crd, kubectl_exec_get_configmap, kubectl_exec_get_events, kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output, @@ -12,7 +15,7 @@ use crate::utilities::calculate_hash; use semver::Version; use std::collections::HashMap; use std::fmt::{Display, Formatter}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::{fs, thread}; use thread::spawn; use tracing::{span, Level}; @@ -82,6 +85,7 @@ pub struct ChartInfo { pub yaml_files_content: Vec, pub parse_stderr_for_error: bool, pub k8s_selector: Option, + pub backup_resources: Option>, } impl ChartInfo { @@ -146,6 +150,7 @@ impl Default for ChartInfo { yaml_files_content: vec![], parse_stderr_for_error: true, k8s_selector: None, + backup_resources: None, } } } @@ -232,7 +237,61 @@ pub trait HelmChart: Send { ); } - helm.upgrade(chart_info, &[]).map_err(to_command_error)?; + let installed_version = match helm.get_chart_version( + chart_info.name.clone(), + Some(chart_info.get_namespace_string().as_str()), + environment_variables.as_slice(), + ) { + Ok(version) => version, + Err(e) => { + warn!("error while trying to get installed version: {:?}", e); + None + } + }; + + let upgrade_status = match prepare_chart_backup_on_upgrade( + kubernetes_config, + chart_info.clone(), + environment_variables.as_slice(), + installed_version, + ) { + Ok(status) => status, + Err(e) => { + warn!("error while trying to prepare backup: {:?}", e); + BackupStatus { + is_backupable: false, + backup_path: PathBuf::new(), + } + } + }; + + match helm.upgrade(chart_info, &[]).map_err(to_command_error) { + Ok(_) => { + if upgrade_status.is_backupable { + if let Err(e) = apply_chart_backup( + kubernetes_config, + upgrade_status.backup_path.as_path(), + environment_variables.as_slice(), + chart_info, + ) { + warn!("error while trying to apply backup: {:?}", e); + }; + } + } + Err(e) => { + if upgrade_status.is_backupable { + if let Err(e) = delete_unused_chart_backup( + kubernetes_config, + environment_variables.as_slice(), + chart_info, + ) { + warn!("error while trying to delete backup: {:?}", e); + } + } + + return Err(e); + } + } } HelmAction::Destroy => { let chart_info = self.get_chart_info(); @@ -358,7 +417,7 @@ pub fn deploy_charts_levels( // Common charts // -#[derive(Default)] +#[derive(Default, Clone)] pub struct CommonChart { pub chart_info: ChartInfo, } diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 75682e0a..c35e17b4 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -560,6 +560,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index bb0aca91..b63e5486 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -504,40 +504,38 @@ impl Helm { }, ); - match helm_ret { - // Ok is ok - Ok(_) => Ok(()), - Err(err) => { - error!("Helm error: {:?}", err); + if let Err(err) = helm_ret { + error!("Helm error: {:?}", err); - // Try do define/specify a bit more the message - let stderr_msg: String = error_message.into_iter().collect(); - let stderr_msg = format!( - "{}: {}", - stderr_msg, - err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) - ); - let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { - HelmError::ReleaseLocked(chart.name.clone()) - } else if stderr_msg.contains("has been rolled back") { - HelmError::Rollbacked(chart.name.clone(), UPGRADE) - } else if stderr_msg.contains("timed out waiting") { - HelmError::Timeout(chart.name.clone(), UPGRADE, stderr_msg) - } else { - CmdError( - chart.name.clone(), - HelmCommand::UPGRADE, - CommandError::new( - "Helm error".to_string(), - Some(stderr_msg), - Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), - ), - ) - }; + // Try do define/specify a bit more the message + let stderr_msg: String = error_message.into_iter().collect(); + let stderr_msg = format!( + "{}: {}", + stderr_msg, + err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) + ); + let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { + HelmError::ReleaseLocked(chart.name.clone()) + } else if stderr_msg.contains("has been rolled back") { + HelmError::Rollbacked(chart.name.clone(), UPGRADE) + } else if stderr_msg.contains("timed out waiting") { + HelmError::Timeout(chart.name.clone(), UPGRADE, stderr_msg) + } else { + CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + "Helm error".to_string(), + Some(stderr_msg), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ), + ) + }; - Err(error) - } - } + return Err(error); + }; + + Ok(()) } pub fn uninstall_chart_if_breaking_version( @@ -612,17 +610,19 @@ mod tests { struct HelmTestCtx { helm: Helm, - chart: ChartInfo, + charts: Vec, } impl HelmTestCtx { fn cleanup(&self) { - let ret = self.helm.uninstall(&self.chart, &[]); - assert!(ret.is_ok()) + for chart in &self.charts { + let ret = self.helm.uninstall(chart, &vec![]); + assert!(ret.is_ok()) + } } fn new(release_name: &str) -> HelmTestCtx { - let chart = ChartInfo::new_from_custom_namespace( + let charts = vec![ChartInfo::new_from_custom_namespace( release_name.to_string(), "tests/helm/simple_nginx".to_string(), "default".to_string(), @@ -630,12 +630,12 @@ mod tests { vec![], false, None, - ); + )]; let mut kube_config = dirs::home_dir().unwrap(); kube_config.push(".kube/config"); - let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); + let helm = Helm::new(kube_config.to_str().unwrap(), &vec![]).unwrap(); - let cleanup = HelmTestCtx { helm, chart }; + let cleanup = HelmTestCtx { helm, charts }; cleanup.cleanup(); cleanup } @@ -656,26 +656,26 @@ mod tests { #[test] fn test_release_exist() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-release-exist"); - let ret = helm.check_release_exist(chart, &[]); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-release-exist"); + let ret = helm.check_release_exist(&charts[0], &vec![]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)) + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)) } #[test] fn test_list_release() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-list-release"); - chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); + charts[0].custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); // no existing namespace should return an empty array let ret = helm.list_release(Some("tsdfsfsdf"), &[]); assert!(matches!(ret, Ok(vec) if vec.is_empty())); // install something - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // We should have at least one release in all the release @@ -683,69 +683,69 @@ mod tests { assert!(matches!(ret, Ok(vec) if !vec.is_empty())); // We should have at least one release in all the release - let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); + let ret = helm.list_release(Some(&charts[0].get_namespace_string()), &vec![]); assert!(matches!(ret, Ok(vec) if vec.len() == 1)); // Install a second stuff let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-list-release-2"); - chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); - let ret = helm.upgrade(chart, &[]); + charts[0].custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); - let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); + let ret = helm.list_release(Some(&charts[0].get_namespace_string()), &vec![]); assert!(matches!(ret, Ok(vec) if vec.len() == 2)); } #[test] fn test_upgrade_diff() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-diff"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade-diff"); - let ret = helm.upgrade_diff(chart, &[]); + let ret = helm.upgrade_diff(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); } #[test] fn test_rollback() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-rollback"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-rollback"); // check release does not exist yet - let ret = helm.rollback(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.rollback(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // First revision cannot be rollback - let ret = helm.rollback(chart, &[]); + let ret = helm.rollback(&charts[0], &vec![]); assert!(matches!(ret, Err(HelmError::CannotRollback(_)))); // 2nd upgrade - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Rollback should be ok now - let ret = helm.rollback(chart, &[]); + let ret = helm.rollback(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); } #[test] fn test_upgrade() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(_))); } @@ -753,37 +753,37 @@ mod tests { fn test_upgrade_timeout() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-upgrade-timeout"); - chart.timeout_in_seconds = 1; + charts[0].timeout_in_seconds = 1; // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(HelmError::Timeout(_, _, _)))); // Release should not exist if it fails - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); } #[test] fn test_upgrade_with_lock_during_install() { // We want to check that we manage to install a chart even if a lock is present while it was the first installation - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-with-lock-install"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade-with-lock-install"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // Spawn our task killer let barrier = Arc::new(Barrier::new(2)); std::thread::spawn({ let barrier = barrier.clone(); - let chart_name = chart.name.clone(); + let chart_name = charts[0].name.clone(); move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); @@ -794,19 +794,19 @@ mod tests { // install it barrier.wait(); - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if release.is_locked())); // New installation should work even if a lock is present - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if !release.is_locked())); } @@ -815,22 +815,22 @@ mod tests { // We want to check that we manage to install a chart even if a lock is present while it not the first installation let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-upgrade-with-lock-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // First install - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Spawn our task killer let barrier = Arc::new(Barrier::new(2)); std::thread::spawn({ let barrier = barrier.clone(); - let chart_name = chart.name.clone(); + let chart_name = charts[0].name.clone(); move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); @@ -839,64 +839,64 @@ mod tests { } }); - chart.values = vec![ChartSetValue { + charts[0].values = vec![ChartSetValue { key: "initialDelaySeconds".to_string(), value: "6".to_string(), }]; barrier.wait(); - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if release.is_locked() && release.version == 2)); // New installation should work even if a lock is present - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if !release.is_locked() && release.version == 4)); } #[test] fn test_uninstall() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-uninstall"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-uninstall"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // deleting something that does not exist should not be an issue - let ret = helm.uninstall(chart, &[]); + let ret = helm.uninstall(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(_))); // Delete it - let ret = helm.uninstall(chart, &[]); + let ret = helm.uninstall(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check release does not exist anymore - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); } #[test] fn test_getting_version() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-version-release"); - let _ = helm.upgrade(chart, &[]); - let releases = helm.list_release(Some(&chart.get_namespace_string()), &[]).unwrap(); + let _ = helm.upgrade(&charts[0], &[]); + let releases = helm.list_release(Some(&charts[0].get_namespace_string()), &[]).unwrap(); assert_eq!(releases[0].clone().version.unwrap(), Version::new(0, 1, 0)) } } diff --git a/src/cmd/helm_utils.rs b/src/cmd/helm_utils.rs new file mode 100644 index 00000000..2d894b0f --- /dev/null +++ b/src/cmd/helm_utils.rs @@ -0,0 +1,411 @@ +use crate::cloud_provider::helm::ChartInfo; +use crate::cmd::helm::HelmError::CmdError; +use crate::cmd::helm::{HelmCommand, HelmError}; +use crate::cmd::kubectl::{ + kubectl_apply_with_path, kubectl_create_secret_from_file, kubectl_delete_secret, kubectl_exec_get_secrets, + kubectl_get_resource_yaml, +}; +use crate::errors::CommandError; +use crate::fs::{ + create_yaml_backup_file, create_yaml_file_from_secret, indent_file, remove_lines_starting_with, + truncate_file_from_word, +}; +use semver::Version; +use serde_derive::Deserialize; +use std::fs::OpenOptions; +use std::io::{BufReader, Read}; +use std::path::{Path, PathBuf}; + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct Backup { + pub name: String, + pub content: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct BackupInfos { + pub name: String, + pub path: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ChartYAML { + #[serde(default)] + pub version: String, + #[serde(default)] + pub app_version: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct BackupStatus { + pub is_backupable: bool, + pub backup_path: PathBuf, +} + +pub fn prepare_chart_backup( + kubernetes_config: P, + workspace_root_dir: T, + chart: &ChartInfo, + envs: &[(&str, &str)], + backup_resources: Vec, +) -> Result, HelmError> +where + P: AsRef, + T: AsRef, +{ + let mut backups: Vec = vec![]; + for backup_resource in backup_resources { + match kubectl_get_resource_yaml( + &kubernetes_config, + envs.to_vec(), + backup_resource.as_str(), + Some(chart.get_namespace_string().as_str()), + ) { + Ok(content) => { + if !content.to_lowercase().contains("no resources found") { + backups.push(Backup { + name: backup_resource, + content, + }); + }; + } + Err(e) => { + error!("Kubectl error: {:?}", e.message_safe()) + } + }; + } + + let mut backup_infos: Vec = vec![]; + + if backups.is_empty() { + return Ok(backup_infos); + } + + for backup in backups.clone() { + if !backup.content.is_empty() && !backup.content.contains("items: []") { + match create_yaml_backup_file( + workspace_root_dir.as_ref(), + chart.name.to_string(), + Some(backup.name.clone()), + backup.content, + ) { + Ok(path) => { + backup_infos.push(BackupInfos { + name: backup.name, + path, + }); + } + Err(e) => { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while creating YAML backup file for {}.", backup.name), + Some(e.to_string()), + None, + ), + )) + } + } + } + } + + for backup_info in backup_infos.clone() { + if let Err(e) = remove_lines_starting_with( + backup_info.path.clone(), + vec!["resourceVersion", "uid", "apiVersion: v1", "items", "kind: List"], + ) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + if let Err(e) = truncate_file_from_word(backup_info.path.clone(), "metadata") { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + if let Err(e) = indent_file(backup_info.path.clone()) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + let backup_name = format!("{}-{}-q-backup", chart.name, backup_info.name); + if let Err(e) = kubectl_create_secret_from_file( + &kubernetes_config, + envs.to_vec(), + Some(chart.namespace.to_string().as_str()), + backup_name, + backup_info.name, + backup_info.path, + ) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + } + } + + Ok(backup_infos) +} + +pub fn apply_chart_backup

( + kubernetes_config: P, + workspace_root_dir: P, + envs: &[(&str, &str)], + chart: &ChartInfo, +) -> Result<(), HelmError> +where + P: AsRef, +{ + let secrets = kubectl_exec_get_secrets( + &kubernetes_config, + chart.clone().namespace.to_string().as_str(), + "", + envs.to_vec(), + ) + .map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + ) + })? + .items; + + for secret in secrets { + if secret.metadata.name.contains("-q-backup") { + let path = match create_yaml_file_from_secret(&workspace_root_dir, secret.clone()) { + Ok(path) => path, + Err(e) => match e.message_safe().to_lowercase().contains("no content") { + true => match kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + Ok(_) => continue, + Err(e) => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )) + } + }, + false => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )) + } + }, + }; + + if let Err(e) = kubectl_apply_with_path(&kubernetes_config, envs.to_vec(), path.as_str()) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + + if let Err(e) = kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + } + } + + Ok(()) +} + +pub fn delete_unused_chart_backup

( + kubernetes_config: P, + envs: &[(&str, &str)], + chart: &ChartInfo, +) -> Result<(), HelmError> +where + P: AsRef, +{ + let secrets = kubectl_exec_get_secrets( + &kubernetes_config, + chart.clone().namespace.to_string().as_str(), + "", + envs.to_vec(), + ) + .map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + ) + })? + .items; + + for secret in secrets { + if secret.metadata.name.contains("-q-backup") { + if let Err(e) = kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + } + } + + Ok(()) +} + +pub fn get_common_helm_chart_infos(chart: &ChartInfo) -> Result { + let string_path = format!("{}/Chart.yaml", chart.path); + let file = OpenOptions::new().read(true).open(string_path.as_str()).map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart infos for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + ) + })?; + let mut content = String::new(); + let _ = BufReader::new(file).read_to_string(&mut content); + match serde_yaml::from_str::(content.as_str()) { + Ok(chart_yaml) => Ok(chart_yaml), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart infos for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + } +} + +pub fn get_common_helm_chart_version(chart: &ChartInfo) -> Result, HelmError> { + let chart_yaml = match get_common_helm_chart_infos(chart) { + Ok(chart_yaml) => chart_yaml, + Err(e) => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )) + } + }; + + if !chart_yaml.app_version.is_empty() { + return match Version::parse(chart_yaml.app_version.as_str()) { + Ok(version) => Ok(Some(version)), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + }; + } + + if !chart_yaml.version.is_empty() { + return match Version::parse(chart_yaml.version.as_str()) { + Ok(version) => Ok(Some(version)), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + }; + } + + Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new_from_safe_message(format!("Unable to get chart version for {}.", chart.name.clone())), + )) +} + +pub fn prepare_chart_backup_on_upgrade

( + kubernetes_config: P, + chart: ChartInfo, + envs: &[(&str, &str)], + installed_version: Option, +) -> Result +where + P: AsRef, +{ + let mut need_backup = false; + let root_dir_path = std::env::temp_dir(); + + if chart.backup_resources.is_some() { + if installed_version.le(&get_common_helm_chart_version(&chart)?) { + if let Err(e) = prepare_chart_backup( + kubernetes_config, + root_dir_path.as_path(), + &chart, + envs, + chart.backup_resources.as_ref().unwrap().to_vec(), + ) { + return Err(e); + }; + + need_backup = true; + } + } + + Ok(BackupStatus { + is_backupable: need_backup, + backup_path: root_dir_path, + }) +} diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 339a8261..8f9a8363 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -1,3 +1,6 @@ +use std::fmt::Debug; +use std::fs::File; +use std::io::Read; use std::path::Path; use retry::delay::Fibonacci; @@ -1204,6 +1207,30 @@ where Ok(result) } +fn kubectl_exec_raw_output

( + args: Vec<&str>, + kubernetes_config: P, + envs: Vec<(&str, &str)>, + keep_format: bool, +) -> Result +where + P: AsRef, +{ + let mut _envs = Vec::with_capacity(envs.len() + 1); + _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); + _envs.extend(envs); + + let mut output_vec: Vec = Vec::with_capacity(50); + let _ = kubectl_exec_with_output(args.clone(), _envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + error!("{}", line) + })?; + + match keep_format { + true => Ok(output_vec.join("\n")), + false => Ok(output_vec.join("")), + } +} + pub fn kubernetes_get_all_pdbs

( kubernetes_config: P, envs: Vec<(&str, &str)>, @@ -1245,3 +1272,98 @@ where kubectl_exec::(cmd_args, kubernetes_config, envs) } + +pub fn kubectl_get_resource_yaml

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + resource: &str, + namespace: Option<&str>, +) -> Result +where + P: AsRef, +{ + let mut cmd_args = vec!["get", resource, "-oyaml"]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, true) +} + +pub fn kubectl_apply_with_path

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + file_path: &str, +) -> Result +where + P: AsRef, +{ + kubectl_exec_raw_output::

(vec!["apply", "-f", file_path], kubernetes_config, envs, false) +} + +pub fn kubectl_create_secret

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + secret_name: String, + key: String, + value: String, +) -> Result +where + P: AsRef, +{ + let secret_arg = format!("--from-literal={}=\"{}\"", key, value); + let mut cmd_args = vec!["create", "secret", "generic", secret_name.as_str(), secret_arg.as_str()]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, false) +} + +pub fn kubectl_delete_secret

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + secret_name: String, +) -> Result +where + P: AsRef, +{ + let mut cmd_args = vec!["delete", "secret", secret_name.as_str()]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, false) +} + +pub fn kubectl_create_secret_from_file

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + backup_name: String, + key: String, + file_path: String, +) -> Result +where + P: AsRef, +{ + let mut file = File::open(file_path.as_str()).unwrap(); + let mut content = String::new(); + let _ = file.read_to_string(&mut content); + + kubectl_create_secret(kubernetes_config, envs, namespace, backup_name, key, content) +} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index 153aab46..d7ff53ec 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -1,6 +1,7 @@ pub mod command; pub mod docker; pub mod helm; +pub mod helm_utils; pub mod kubectl; pub mod structs; pub mod terraform; diff --git a/src/cmd/structs.rs b/src/cmd/structs.rs index 238024eb..0e6135a8 100644 --- a/src/cmd/structs.rs +++ b/src/cmd/structs.rs @@ -42,6 +42,7 @@ pub struct SecretItem { pub api_version: String, pub kind: String, pub metadata: SecretMetadata, + pub data: HashMap, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] diff --git a/src/fs.rs b/src/fs.rs index 7a68a48f..e35deca8 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -1,11 +1,16 @@ use std::collections::HashSet; use std::fs; -use std::fs::{create_dir_all, File}; -use std::io::{Error, ErrorKind}; +use std::fs::{create_dir_all, File, OpenOptions}; +use std::io::{BufRead, BufReader, Error, ErrorKind, Write}; use std::path::Path; +use crate::cmd::structs::SecretItem; +use crate::errors::CommandError; +use base64::decode; use flate2::write::GzEncoder; use flate2::Compression; +use itertools::Itertools; +use serde::__private::from_utf8_lossy; use std::ffi::OsStr; use walkdir::WalkDir; @@ -151,6 +156,232 @@ pub fn create_workspace_archive(working_root_dir: &str, execution_id: &str) -> R } } +pub fn create_yaml_backup_file

( + working_root_dir: P, + chart_name: String, + resource_name: Option, + content: String, +) -> Result +where + P: AsRef, +{ + let dir = working_root_dir.as_ref().join("backups"); + + if let Err(e) = create_dir_all(&dir) { + return Err(CommandError::new( + "Unable to create root dir path.".to_string(), + Some(e.to_string()), + None, + )); + } + + let root_path = dir + .to_str() + .map(|e| e.to_string()) + .ok_or_else(|| CommandError::new_from_safe_message("Unable to get backups root dir path.".to_string())); + + let string_path = match resource_name.is_some() { + true => format!( + "{}/{}-{}-q-backup.yaml", + root_path?, + chart_name, + resource_name.as_ref().unwrap() + ), + false => format!("{}/{}.yaml", root_path?, chart_name), + }; + let str_path = string_path.as_str(); + let path = Path::new(str_path); + + let mut file = match File::create(&path) { + Err(e) => { + return Err(CommandError::new( + format!("Unable to create YAML backup file for chart {}.", chart_name), + Some(e.to_string()), + None, + )) + } + Ok(file) => file, + }; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file for chart {}.", chart_name), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path.to_str().map(|e| e.to_string()).ok_or_else(|| { + CommandError::new_from_safe_message(format!( + "Unable to get YAML backup file path for chart {}.", + chart_name + )) + })?), + } +} + +pub fn remove_lines_starting_with(path: String, starters: Vec<&str>) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let mut content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + for starter in starters { + content = content + .into_iter() + .filter(|line| !line.contains(starter)) + .collect::>() + } + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.join("\n").as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn truncate_file_from_word(path: String, truncate_from: &str) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let content_vec = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + let truncate_from_index = match content_vec.iter().rposition(|line| line.contains(truncate_from)) { + None => content_vec.len(), + Some(index) => index, + }; + + let content = Vec::from(&content_vec[..truncate_from_index]).join("\n"); + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn indent_file(path: String) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let file_content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + let content = file_content.iter().map(|line| line[2..].to_string()).join("\n"); + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn list_yaml_backup_files

(working_root_dir: P) -> Result, CommandError> +where + P: AsRef, +{ + let files = WalkDir::new(working_root_dir) + .follow_links(true) + .into_iter() + .filter_map(|e| e.ok()); + let mut backup_paths: Vec = vec![]; + for file in files { + if file + .file_name() + .to_str() + .ok_or_else(|| { + CommandError::new_from_safe_message(format!("Unable to get YAML backup file name {:?}.", file)) + })? + .to_string() + .contains("-q-backup.yaml") + { + backup_paths.push( + file.path() + .to_str() + .ok_or_else(|| { + CommandError::new_from_safe_message(format!("Unable to get YAML backup file name {:?}.", file)) + })? + .to_string(), + ) + } + } + + if backup_paths.is_empty() { + return Err(CommandError::new_from_safe_message( + "Unable to get YAML backup files".to_string(), + )); + } + + Ok(backup_paths) +} + +pub fn create_yaml_file_from_secret

(working_root_dir: P, secret: SecretItem) -> Result +where + P: AsRef, +{ + let message = format!("Unable to decode secret {}", secret.metadata.name); + let secret_data = secret.data.values().next(); + let secret_content = match secret_data.is_some() { + true => secret_data.unwrap().to_string(), + false => return Err(CommandError::new_from_safe_message(message)), + }; + + let content = match decode(secret_content) { + Ok(bytes) => from_utf8_lossy(&bytes[1..bytes.len() - 1]).to_string(), + Err(e) => return Err(CommandError::new(message, Some(e.to_string()), None)), + }; + match create_yaml_backup_file(working_root_dir.as_ref(), secret.metadata.name.clone(), None, content) { + Ok(path) => Ok(path), + Err(e) => Err(CommandError::new( + format!("Unable to create backup file from secret {}", secret.metadata.name), + Some(e.to_string()), + None, + )), + } +} + #[cfg(test)] mod tests { extern crate tempdir; @@ -159,7 +390,6 @@ mod tests { use flate2::read::GzDecoder; use std::collections::HashSet; use std::fs::File; - use std::io::prelude::*; use std::io::BufReader; use tempdir::TempDir; @@ -252,4 +482,102 @@ mod tests { tmp_files.into_iter().for_each(drop); tmp_dir.close().expect("error closing temporary directory"); } + + #[test] + fn test_backup_cleaning() { + let content = r#" + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + annotations: + meta.helm.sh/release-name: cert-manager-configs + meta.helm.sh/release-namespace: cert-manager + creationTimestamp: "2021-11-04T10:26:27Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: Helm + name: qovery + namespace: qovery + resourceVersion: "28347460" + uid: 509aad5f-db2d-44c3-b03b-beaf144118e2 + spec: + dnsNames: + - 'qovery' + issuerRef: + kind: ClusterIssuer + name: qovery + secretName: qovery + status: + conditions: + - lastTransitionTime: "2021-11-30T15:33:03Z" + message: Certificate is up to date and has not expired + reason: Ready + status: "True" + type: Ready + notAfter: "2022-04-29T13:34:51Z" + notBefore: "2022-01-29T13:34:52Z" + renewalTime: "2022-03-30T13:34:51Z" + revision: 3 + "#; + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let mut file_path = create_yaml_backup_file( + tmp_dir.path().to_str().unwrap(), + "test".to_string(), + Some("test".to_string()), + content.to_string(), + ) + .expect("No such file"); + file_path = remove_lines_starting_with(file_path, vec!["resourceVersion", "uid"]).unwrap(); + + let file = OpenOptions::new() + .read(true) + .write(true) + .open(file_path) + .expect("file doesn't exist"); + + let result = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>() + .join("\n"); + + let new_content = r#" + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + annotations: + meta.helm.sh/release-name: cert-manager-configs + meta.helm.sh/release-namespace: cert-manager + creationTimestamp: "2021-11-04T10:26:27Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: Helm + name: qovery + namespace: qovery + spec: + dnsNames: + - 'qovery' + issuerRef: + kind: ClusterIssuer + name: qovery + secretName: qovery + status: + conditions: + - lastTransitionTime: "2021-11-30T15:33:03Z" + message: Certificate is up to date and has not expired + reason: Ready + status: "True" + type: Ready + notAfter: "2022-04-29T13:34:51Z" + notBefore: "2022-01-29T13:34:52Z" + renewalTime: "2022-03-30T13:34:51Z" + revision: 3 + "# + .to_string(); + + assert_eq!(result, new_content); + drop(file); + tmp_dir.close().expect("error closing temporary directory"); + } } diff --git a/tests/helm/cert_manager.rs b/tests/helm/cert_manager.rs new file mode 100644 index 00000000..f356bc21 --- /dev/null +++ b/tests/helm/cert_manager.rs @@ -0,0 +1,348 @@ +use base64::decode; +use qovery_engine::cloud_provider::helm::{ + deploy_charts_levels, ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces, +}; +use qovery_engine::cmd::helm::Helm; +use qovery_engine::cmd::kubectl::{kubectl_exec_delete_namespace, kubectl_exec_get_secrets, kubectl_get_resource_yaml}; +use qovery_engine::cmd::structs::SecretItem; +use qovery_engine::fs::list_yaml_backup_files; +use serde_derive::Deserialize; +use serde_derive::Serialize; +use std::fs; +use std::fs::OpenOptions; +use std::io::{BufRead, BufReader}; +use std::path::{Path, PathBuf}; +use std::str::from_utf8; +use std::thread::sleep; +use std::time::Duration; +use tempdir::TempDir; +use test_utilities::utilities::FuncTestsSecrets; + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Certificate { + pub api_version: String, + pub items: Vec, + pub kind: String, + pub metadata: Metadata2, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Item { + pub api_version: String, + pub kind: String, + pub metadata: Metadata, + pub spec: Spec, + pub status: Status, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Metadata { + pub annotations: Annotations, + pub creation_timestamp: String, + pub generation: i64, + pub labels: Labels, + pub name: String, + pub namespace: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Annotations { + #[serde(rename = "meta.helm.sh/release-name")] + pub meta_helm_sh_release_name: String, + #[serde(rename = "meta.helm.sh/release-namespace")] + pub meta_helm_sh_release_namespace: String, + #[serde(default, rename = "kubectl.kubernetes.io/last-applied-configuration")] + pub last_applied_configuration: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Labels { + #[serde(rename = "app.kubernetes.io/managed-by")] + pub app_kubernetes_io_managed_by: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Spec { + pub dns_names: Vec, + pub issuer_ref: IssuerRef, + pub secret_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IssuerRef { + pub kind: String, + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Status {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Metadata2 { + pub self_link: String, +} + +fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) { + let vault_secrets = FuncTestsSecrets::new(); + let mut kube_config = dirs::home_dir().unwrap(); + kube_config.push(".kube/config"); + let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); + let cert_manager = CommonChart { + chart_info: ChartInfo { + name: "cert-manager".to_string(), + path: "lib/common/bootstrap/charts/cert-manager".to_string(), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + // https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + ChartSetValue { + key: "extraArgs".to_string(), + value: "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}" + .to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.enabled".to_string(), + // Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require + // prometheus to be already installed + value: "false".to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.prometheusInstance".to_string(), + value: "qovery".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cert_manager_config = CommonChart { + chart_info: ChartInfo { + name: "cert-manager-configs".to_string(), + path: "lib/common/bootstrap/charts/cert-manager-configs".to_string(), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "externalDnsProvider".to_string(), + value: "cloudflare".to_string(), + }, + ChartSetValue { + key: "provider.cloudflare.apiToken".to_string(), + value: vault_secrets.CLOUDFLARE_TOKEN.unwrap().to_string(), + }, + ChartSetValue { + key: "provider.cloudflare.email".to_string(), + value: vault_secrets.CLOUDFLARE_ID.as_ref().unwrap().to_string(), + }, + ChartSetValue { + key: "acme.letsEncrypt.emailReport".to_string(), + value: vault_secrets.CLOUDFLARE_ID.unwrap().to_string(), + }, + ChartSetValue { + key: "acme.letsEncrypt.acmeUrl".to_string(), + value: "https://acme-staging-v02.api.letsencrypt.org/directory".to_string(), + }, + ], + ..Default::default() + }, + }; + + (helm, kube_config, cert_manager, cert_manager_config) +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_create_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + let backup_infos = helm + .prepare_chart_backup(root_dir_path, &cert_manager.chart_info, &vec![], vec!["cert".to_string()]) + .unwrap(); + let secrets = kubectl_exec_get_secrets( + kube_config.as_path(), + cert_manager.chart_info.namespace.to_string().as_str(), + "", + vec![], + ) + .unwrap(); + assert_eq!(backup_infos.len(), 1); + + for backup_info in backup_infos { + let backup_name = format!("{}-{}-q-backup", &cert_manager.chart_info.name, backup_info.name.clone()); + assert!(Path::new(backup_info.path.as_str()).exists()); + let secret = secrets + .items + .clone() + .into_iter() + .filter(|secret| secret.metadata.name == backup_name) + .collect::>(); + let secret_content = decode(secret[0].data[&backup_info.name].clone()).unwrap(); + let content = from_utf8(secret_content.as_slice()).unwrap().to_string(); + let file = OpenOptions::new().read(true).open(backup_info.path.as_str()).unwrap(); + let file_content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>() + .join("\n"); + assert_ne!(content.len(), 0); + assert_ne!(file_content.len(), 0); + assert!(content.contains(&file_content)); + } + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_apply_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + let _ = helm + .prepare_chart_backup( + root_dir_path, + cert_manager_config.get_chart_info(), + &vec![], + vec!["cert".to_string()], + ) + .unwrap(); + + match helm.apply_chart_backup(root_dir_path, &vec![], cert_manager_config.get_chart_info()) { + Err(_) => { + assert!(false) + } + Ok(..) => { + let string_path = list_yaml_backup_files(root_dir_path).unwrap().first().unwrap().clone(); + let str_path = string_path.as_str(); + let path = Path::new(str_path); + let backup_string = fs::read_to_string(path).unwrap(); + let cert_string = kubectl_get_resource_yaml( + kube_config.as_path(), + vec![], + "cert", + Some(cert_manager_config.namespace().as_str()), + ) + .unwrap(); + let backup_cert = serde_yaml::from_str::(backup_string.as_str()).unwrap(); + let cert = serde_yaml::from_str::(cert_string.as_str()).unwrap(); + assert_eq!(backup_cert.items.first().unwrap().spec, cert.items.first().unwrap().spec) + } + }; + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_should_not_create_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + + // trying to create a backup from an unknown (toto) resource + let backup_infos = helm + .prepare_chart_backup(root_dir_path, &cert_manager.chart_info, &vec![], vec!["toto".to_string()]) + .unwrap(); + + assert_eq!(backup_infos.len(), 0); + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_should_apply_chart_backup() { + let (helm, kube_config, cert_manager, mut cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + cert_manager_config.chart_info.backup_resources = Some(vec!["cert".to_string()]); + + let lvl_2_bis: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2_bis], false).map_err(|_| assert!(false)); + + let secrets = kubectl_exec_get_secrets( + kube_config.as_path(), + cert_manager.chart_info.namespace.to_string().as_str(), + "", + vec![], + ) + .unwrap(); + + let cert_secret = secrets + .items + .into_iter() + .filter(|secret| secret.metadata.name == "cert-manager-configs-cert-q-backup") + .collect::>(); + + assert_eq!(cert_secret.len(), 0); + + let cert_string = kubectl_get_resource_yaml( + kube_config.as_path(), + vec![], + "cert", + Some(cert_manager_config.namespace().as_str()), + ) + .unwrap(); + let cert = serde_yaml::from_str::(cert_string.as_str()).unwrap(); + + assert_ne!(cert.items[0].metadata.annotations.last_applied_configuration, ""); + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} diff --git a/tests/helm/mod.rs b/tests/helm/mod.rs new file mode 100644 index 00000000..3a720572 --- /dev/null +++ b/tests/helm/mod.rs @@ -0,0 +1 @@ +mod cert_manager; diff --git a/tests/lib.rs b/tests/lib.rs index bbc13eb3..48aaa913 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,4 +3,5 @@ extern crate maplit; mod aws; mod digitalocean; +mod helm; mod scaleway; From 47f17566d49ebfd02e7e2d6a8247898e38ec8efc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Thu, 5 May 2022 10:06:09 +0200 Subject: [PATCH 111/122] Fix: Advance settings use correct names (#706) --- src/io_models.rs | 18 +++++++++++------- src/models/application.rs | 10 +++++----- test_utilities/src/common.rs | 20 ++++++++++---------- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/src/io_models.rs b/src/io_models.rs index 06f6bb4a..b37ab285 100644 --- a/src/io_models.rs +++ b/src/io_models.rs @@ -189,14 +189,18 @@ pub struct Port { } #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] -pub struct ApplicationAdvanceSettings { +pub struct ApplicationAdvancedSettings { + #[serde(alias = "deployment.delay_start_time_sec")] pub deployment_delay_start_time_sec: u32, + #[serde(alias = "build.timeout_max_sec")] + pub build_timeout_max_sec: u32, } -impl Default for ApplicationAdvanceSettings { +impl Default for ApplicationAdvancedSettings { fn default() -> Self { - ApplicationAdvanceSettings { + ApplicationAdvancedSettings { deployment_delay_start_time_sec: 30, + build_timeout_max_sec: 30 * 60, // 30min } } } @@ -225,7 +229,7 @@ pub struct Application { /// Use BTreeMap to get Hash trait which is not available on HashMap pub environment_vars: BTreeMap, #[serde(default)] - pub advance_settings: ApplicationAdvanceSettings, + pub advanced_settings: ApplicationAdvancedSettings, } impl Application { @@ -254,7 +258,7 @@ impl Application { build, self.storage.iter().map(|s| s.to_aws_storage()).collect::>(), environment_variables, - self.advance_settings.clone(), + self.advanced_settings.clone(), AwsAppExtraSettings {}, listeners, logger.clone(), @@ -273,7 +277,7 @@ impl Application { build, self.storage.iter().map(|s| s.to_do_storage()).collect::>(), environment_variables, - self.advance_settings.clone(), + self.advanced_settings.clone(), DoAppExtraSettings {}, listeners, logger.clone(), @@ -292,7 +296,7 @@ impl Application { build, self.storage.iter().map(|s| s.to_scw_storage()).collect::>(), environment_variables, - self.advance_settings.clone(), + self.advanced_settings.clone(), ScwAppExtraSettings {}, listeners, logger.clone(), diff --git a/src/models/application.rs b/src/models/application.rs index 3a3c5218..8f43b41f 100644 --- a/src/models/application.rs +++ b/src/models/application.rs @@ -12,7 +12,7 @@ use crate::cloud_provider::DeploymentTarget; use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset}; use crate::errors::EngineError; use crate::events::{EnvironmentStep, EventDetails, Stage, ToTransmitter, Transmitter}; -use crate::io_models::{ApplicationAdvanceSettings, Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; +use crate::io_models::{ApplicationAdvancedSettings, Context, Listen, Listener, Listeners, Port, QoveryIdentifier}; use crate::logger::Logger; use crate::models::types::{CloudProvider, ToTeraContext}; use crate::utilities::to_short_id; @@ -45,7 +45,7 @@ pub struct Application { pub(super) environment_variables: Vec, pub(super) listeners: Listeners, pub(super) logger: Box, - pub(super) advance_settings: ApplicationAdvanceSettings, + pub(super) advanced_settings: ApplicationAdvancedSettings, pub(super) _extra_settings: T::AppExtraSettings, } @@ -65,7 +65,7 @@ impl Application { build: Build, storage: Vec>, environment_variables: Vec, - advance_settings: ApplicationAdvanceSettings, + advance_settings: ApplicationAdvancedSettings, extra_settings: T::AppExtraSettings, listeners: Listeners, logger: Box, @@ -90,7 +90,7 @@ impl Application { environment_variables, listeners, logger, - advance_settings, + advanced_settings: advance_settings, _extra_settings: extra_settings, }) } @@ -128,7 +128,7 @@ impl Application { context.insert("image_name_with_tag", &self.build.image.full_image_name_with_tag()); context.insert( "start_timeout_in_seconds", - &self.advance_settings.deployment_delay_start_time_sec, + &self.advanced_settings.deployment_delay_start_time_sec, ); let environment_variables = self diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index c0e947f7..7946c4d7 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -305,7 +305,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }, Application { long_id: Uuid::new_v4(), @@ -352,7 +352,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }, Application { long_id: Uuid::new_v4(), @@ -401,7 +401,7 @@ pub fn environment_3_apps_3_routers_3_databases( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }, ], routers: vec![ @@ -561,7 +561,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![Router { long_id: Uuid::new_v4(), @@ -615,7 +615,7 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { min_instances: 1, max_instances: 1, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![], databases: vec![], @@ -771,7 +771,7 @@ pub fn environnement_2_app_2_routers_1_psql( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }, Application { long_id: Uuid::new_v4(), @@ -818,7 +818,7 @@ pub fn environnement_2_app_2_routers_1_psql( min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }, ], routers: vec![ @@ -915,7 +915,7 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![Router { long_id: Uuid::new_v4(), @@ -976,7 +976,7 @@ pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![], databases: vec![], @@ -1026,7 +1026,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) min_instances: 2, max_instances: 2, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![Router { long_id: Uuid::new_v4(), From 91749538b71bc733b5280c98451820033f632e29 Mon Sep 17 00:00:00 2001 From: Pierre Mavro Date: Thu, 5 May 2022 12:20:09 +0200 Subject: [PATCH 112/122] feat: add tests build to build pipeline --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ba465df9..df94bc3d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,7 +43,7 @@ jobs: sccache --version sccache --show-stats echo "########## START BUILD ##########" - cargo build --all-features + cargo build --all-features --tests sccache --show-stats echo "########## START UNIT TESTS ##########" cargo test From b315a61b381bee8fdf33e4b6d6c7d2581fa199ec Mon Sep 17 00:00:00 2001 From: Benjamin Chastanier Date: Thu, 5 May 2022 12:55:13 +0200 Subject: [PATCH 113/122] tests: fixing lint and build --- src/cmd/helm_utils.rs | 24 +++++++++++------------- test_utilities/src/common.rs | 34 ++++++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/cmd/helm_utils.rs b/src/cmd/helm_utils.rs index 2d894b0f..27fe6cec 100644 --- a/src/cmd/helm_utils.rs +++ b/src/cmd/helm_utils.rs @@ -388,20 +388,18 @@ where let mut need_backup = false; let root_dir_path = std::env::temp_dir(); - if chart.backup_resources.is_some() { - if installed_version.le(&get_common_helm_chart_version(&chart)?) { - if let Err(e) = prepare_chart_backup( - kubernetes_config, - root_dir_path.as_path(), - &chart, - envs, - chart.backup_resources.as_ref().unwrap().to_vec(), - ) { - return Err(e); - }; + if chart.backup_resources.is_some() && installed_version.le(&get_common_helm_chart_version(&chart)?) { + if let Err(e) = prepare_chart_backup( + kubernetes_config, + root_dir_path.as_path(), + &chart, + envs, + chart.backup_resources.as_ref().unwrap().to_vec(), + ) { + return Err(e); + }; - need_backup = true; - } + need_backup = true; } Ok(BackupStatus { diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 7946c4d7..549eb9e8 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -657,7 +657,7 @@ pub fn database_test_environment_on_upgrade(context: &Context) -> EnvironmentReq min_instances: 1, max_instances: 1, cpu_burst: "100m".to_string(), - advance_settings: Default::default(), + advanced_settings: Default::default(), }], routers: vec![], databases: vec![], @@ -1891,24 +1891,33 @@ pub fn test_db_on_upgrade( &context, logger.clone(), localisation.as_str(), + KubernetesKind::Eks, kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), Kind::Do => DO::docker_cr_engine( &context, logger.clone(), localisation.as_str(), + KubernetesKind::Doks, kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), Kind::Scw => Scaleway::docker_cr_engine( &context, logger.clone(), localisation.as_str(), + KubernetesKind::ScwKapsule, kubernetes_version.clone(), - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context.cluster_id().to_string(), + }, None, ), }; @@ -1971,29 +1980,38 @@ pub fn test_db_on_upgrade( &context_for_delete, logger.clone(), localisation.as_str(), + KubernetesKind::Eks, kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context_for_delete.cluster_id().to_string(), + }, None, ), Kind::Do => DO::docker_cr_engine( &context_for_delete, logger.clone(), localisation.as_str(), + KubernetesKind::Doks, kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context_for_delete.cluster_id().to_string(), + }, None, ), Kind::Scw => Scaleway::docker_cr_engine( &context_for_delete, logger.clone(), localisation.as_str(), + KubernetesKind::ScwKapsule, kubernetes_version, - &ClusterDomain::Default, + &ClusterDomain::Default { + cluster_id: context_for_delete.cluster_id().to_string(), + }, None, ), }; - // let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); + let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); return test_name.to_string(); From ebe76a512902c600e0cfc81c1dd0640ad1e6d350 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Fri, 6 May 2022 16:26:48 +0200 Subject: [PATCH 114/122] fix linter --- test_utilities/src/aws.rs | 21 ++- test_utilities/src/common.rs | 248 ++++++++++++++--------------- test_utilities/src/digitalocean.rs | 7 +- test_utilities/src/scaleway.rs | 15 +- test_utilities/src/utilities.rs | 147 ++++++++--------- 5 files changed, 208 insertions(+), 230 deletions(-) diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 8f126aa3..185b7ad5 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -26,8 +26,7 @@ pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3; pub const AWS_TEST_REGION: AwsRegion = AwsRegion::EuWest3; pub const AWS_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const AWS_KUBERNETES_MINOR_VERSION: u8 = 19; -pub const AWS_KUBERNETES_VERSION: &'static str = - formatcp!("{}.{}", AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION); +pub const AWS_KUBERNETES_VERSION: &str = formatcp!("{}.{}", AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION); pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; @@ -59,7 +58,7 @@ pub fn container_registry_ecr(context: &Context, logger: Box) -> ECR pub fn aws_default_engine_config(context: &Context, logger: Box) -> EngineConfig { AWS::docker_cr_engine( - &context, + context, logger, AWS_TEST_REGION.to_string().as_str(), KubernetesKind::Eks, @@ -216,19 +215,19 @@ impl Cluster for AWS { .EKS_ACCESS_CIDR_BLOCKS .as_ref() .unwrap() - .replace("\"", "") - .replace("[", "") - .replace("]", "") - .split(",") + .replace('\"', "") + .replace('[', "") + .replace(']', "") + .split(',') .map(|c| c.to_string()) .collect(), ec2_access_cidr_blocks: secrets .EKS_ACCESS_CIDR_BLOCKS // FIXME ? use an EC2_ACCESS_CIDR_BLOCKS? .unwrap() - .replace("\"", "") - .replace("[", "") - .replace("]", "") - .split(",") + .replace('\"', "") + .replace('[', "") + .replace(']', "") + .split(',') .map(|c| c.to_string()) .collect(), rds_cidr_subnet: "23".to_string(), diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 549eb9e8..2dc21207 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -220,9 +220,9 @@ pub fn environment_3_apps_3_routers_3_databases( database_disk_type: &str, provider_kind: Kind, ) -> EnvironmentRequest { - let app_name_1 = format!("{}-{}", "simple-app-1".to_string(), generate_id()); - let app_name_2 = format!("{}-{}", "simple-app-2".to_string(), generate_id()); - let app_name_3 = format!("{}-{}", "simple-app-3".to_string(), generate_id()); + let app_name_1 = format!("{}-{}", "simple-app-1", generate_id()); + let app_name_2 = format!("{}-{}", "simple-app-2", generate_id()); + let app_name_3 = format!("{}-{}", "simple-app-3", generate_id()); // mongoDB management part let database_host_mongo = get_svc_name(DatabaseKind::Mongodb, provider_kind.clone()).to_string(); @@ -248,7 +248,7 @@ pub fn environment_3_apps_3_routers_3_databases( let database_name = "postgres".to_string(); // pSQL 2 management part - let fqdn_2 = format!("{}2", get_svc_name(DatabaseKind::Postgresql, provider_kind.clone())); + let fqdn_2 = format!("{}2", get_svc_name(DatabaseKind::Postgresql, provider_kind)); let database_username_2 = "superuser2".to_string(); let database_name_2 = "postgres2".to_string(); @@ -380,9 +380,9 @@ pub fn environment_3_apps_3_routers_3_databases( environment_vars: btreemap! { "IS_DOCUMENTDB".to_string() => base64::encode("false"), "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string() => base64::encode(database_host_mongo.clone()), - "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string() => base64::encode(database_uri_mongo.clone()), + "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string() => base64::encode(database_uri_mongo), "QOVERY_DATABASE_TESTING_DATABASE_PORT".to_string() => base64::encode(database_port_mongo.to_string()), - "MONGODB_DBNAME".to_string() => base64::encode(&database_db_name_mongo.clone()), + "MONGODB_DBNAME".to_string() => base64::encode(&database_db_name_mongo), "QOVERY_DATABASE_TESTING_DATABASE_USERNAME".to_string() => base64::encode(database_username_mongo.clone()), "QOVERY_DATABASE_TESTING_DATABASE_PASSWORD".to_string() => base64::encode(database_password_mongo.clone()), }, @@ -409,12 +409,12 @@ pub fn environment_3_apps_3_routers_3_databases( long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/app1".to_string(), - application_name: app_name_1.clone(), + application_name: app_name_1, }], sticky_sessions_enabled: false, }, @@ -422,12 +422,12 @@ pub fn environment_3_apps_3_routers_3_databases( long_id: Uuid::new_v4(), name: "second-router".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/app2".to_string(), - application_name: app_name_2.clone(), + application_name: app_name_2, }], sticky_sessions_enabled: false, }, @@ -435,12 +435,12 @@ pub fn environment_3_apps_3_routers_3_databases( long_id: Uuid::new_v4(), name: "third-router".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/app3".to_string(), - application_name: app_name_3.clone(), + application_name: app_name_3, }], sticky_sessions_enabled: false, }, @@ -450,12 +450,12 @@ pub fn environment_3_apps_3_routers_3_databases( kind: DatabaseKind::Postgresql, action: Action::Create, long_id: Uuid::new_v4(), - name: database_name.clone(), + name: database_name, version: "11.8.0".to_string(), fqdn_id: fqdn.clone(), - fqdn: fqdn.clone(), - port: database_port.clone(), - username: database_username.clone(), + fqdn, + port: database_port, + username: database_username, password: database_password.clone(), total_cpus: "100m".to_string(), total_ram_in_mib: 512, @@ -472,13 +472,13 @@ pub fn environment_3_apps_3_routers_3_databases( kind: DatabaseKind::Postgresql, action: Action::Create, long_id: Uuid::new_v4(), - name: database_name_2.clone(), + name: database_name_2, version: "11.8.0".to_string(), fqdn_id: fqdn_2.clone(), - fqdn: fqdn_2.clone(), - port: database_port.clone(), - username: database_username_2.clone(), - password: database_password.clone(), + fqdn: fqdn_2, + port: database_port, + username: database_username_2, + password: database_password, total_cpus: "100m".to_string(), total_ram_in_mib: 512, disk_size_in_gib: 10, @@ -494,13 +494,13 @@ pub fn environment_3_apps_3_routers_3_databases( kind: DatabaseKind::Mongodb, action: Action::Create, long_id: Uuid::new_v4(), - name: database_db_name_mongo.clone(), + name: database_db_name_mongo, version: version_mongo.to_string(), fqdn_id: database_host_mongo.clone(), - fqdn: database_host_mongo.clone(), - port: database_port_mongo.clone(), - username: database_username_mongo.clone(), - password: database_password_mongo.clone(), + fqdn: database_host_mongo, + port: database_port_mongo, + username: database_username_mongo, + password: database_password_mongo, total_cpus: "100m".to_string(), total_ram_in_mib: 512, disk_size_in_gib: 10, @@ -520,9 +520,9 @@ pub fn environment_3_apps_3_routers_3_databases( pub fn working_minimal_environment(context: &Context, test_domain: &str) -> EnvironmentRequest { let suffix = generate_id(); let application_id = generate_id(); - let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); + let application_name = format!("{}-{}", "simple-app", &suffix); let router_name = "main".to_string(); - let application_domain = format!("{}.{}.{}", application_id, context.cluster_id().to_string(), test_domain); + let application_domain = format!("{}.{}.{}", application_id, context.cluster_id(), test_domain); EnvironmentRequest { execution_id: context.execution_id().to_string(), id: generate_id(), @@ -572,7 +572,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi custom_domains: vec![], routes: vec![Route { path: "/".to_string(), - application_name: format!("{}-{}", "simple-app".to_string(), &suffix), + application_name: format!("{}-{}", "simple-app", &suffix), }], sticky_sessions_enabled: false, }], @@ -583,7 +583,7 @@ pub fn working_minimal_environment(context: &Context, test_domain: &str) -> Envi pub fn database_test_environment(context: &Context) -> EnvironmentRequest { let suffix = generate_id(); - let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); + let application_name = format!("{}-{}", "simple-app", &suffix); EnvironmentRequest { execution_id: context.execution_id().to_string(), @@ -625,7 +625,7 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { pub fn database_test_environment_on_upgrade(context: &Context) -> EnvironmentRequest { let suffix = "c3dn5so3dltod3s"; - let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); + let application_name = format!("{}-{}", "simple-app", &suffix); EnvironmentRequest { execution_id: context.execution_id().to_string(), @@ -669,7 +669,7 @@ pub fn environment_only_http_server_router_with_sticky_session( context: &Context, test_domain: &str, ) -> EnvironmentRequest { - let mut env = environment_only_http_server_router(context, test_domain.clone()); + let mut env = environment_only_http_server_router(context, test_domain); for mut router in &mut env.routers { router.sticky_sessions_enabled = true; @@ -689,7 +689,7 @@ pub fn environnement_2_app_2_routers_1_psql( let database_port = 5432; let database_username = "superuser".to_string(); - let database_password = generate_password(provider_kind.clone(), DatabaseMode::CONTAINER); + let database_password = generate_password(provider_kind, DatabaseMode::CONTAINER); let database_name = "postgres".to_string(); let suffix = generate_id(); @@ -711,7 +711,7 @@ pub fn environnement_2_app_2_routers_1_psql( version: "11.8.0".to_string(), fqdn_id: fqdn.clone(), fqdn: fqdn.clone(), - port: database_port.clone(), + port: database_port, username: database_username.clone(), password: database_password.clone(), total_cpus: "100m".to_string(), @@ -797,11 +797,11 @@ pub fn environnement_2_app_2_routers_1_psql( snapshot_retention_in_days: 0, }], environment_vars: btreemap! { - "PG_DBNAME".to_string() => base64::encode(database_name.clone()), - "PG_HOST".to_string() => base64::encode(fqdn.clone()), + "PG_DBNAME".to_string() => base64::encode(database_name), + "PG_HOST".to_string() => base64::encode(fqdn), "PG_PORT".to_string() => base64::encode(database_port.to_string()), - "PG_USERNAME".to_string() => base64::encode(database_username.clone()), - "PG_PASSWORD".to_string() => base64::encode(database_password.clone()), + "PG_USERNAME".to_string() => base64::encode(database_username), + "PG_PASSWORD".to_string() => base64::encode(database_password), }, branch: "master".to_string(), ports: vec![Port { @@ -826,12 +826,12 @@ pub fn environnement_2_app_2_routers_1_psql( long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/".to_string(), - application_name: application_name1.to_string(), + application_name: application_name1, }], sticky_sessions_enabled: false, }, @@ -839,12 +839,12 @@ pub fn environnement_2_app_2_routers_1_psql( long_id: Uuid::new_v4(), name: "second-router".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/coco".to_string(), - application_name: application_name2.to_string(), + application_name: application_name2, }], sticky_sessions_enabled: false, }, @@ -883,7 +883,7 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment action: Action::Create, applications: vec![Application { long_id: Uuid::new_v4(), - name: format!("{}-{}", "echo-app".to_string(), &suffix), + name: format!("{}-{}", "echo-app", &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "2205adea1db295547b99f7b17229afd7e879b6ff".to_string(), @@ -921,12 +921,12 @@ pub fn echo_app_environment(context: &Context, test_domain: &str) -> Environment long_id: Uuid::new_v4(), name: "main".to_string(), action: Action::Create, - default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id().to_string(), test_domain), + default_domain: format!("{}.{}.{}", generate_id(), context.cluster_id(), test_domain), public_port: 443, custom_domains: vec![], routes: vec![Route { path: "/".to_string(), - application_name: format!("{}-{}", "echo-app".to_string(), &suffix), + application_name: format!("{}-{}", "echo-app", &suffix), }], sticky_sessions_enabled: false, }], @@ -946,7 +946,7 @@ pub fn environment_only_http_server(context: &Context) -> EnvironmentRequest { action: Action::Create, applications: vec![Application { long_id: Uuid::new_v4(), - name: format!("{}-{}", "mini-http".to_string(), &suffix), + name: format!("{}-{}", "mini-http", &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "a873edd459c97beb51453db056c40bca85f36ef9".to_string(), @@ -996,7 +996,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) action: Action::Create, applications: vec![Application { long_id: id, - name: format!("{}-{}", "mini-http".to_string(), &suffix), + name: format!("{}-{}", "mini-http", &suffix), /*name: "simple-app".to_string(),*/ git_url: "https://github.com/Qovery/engine-testing.git".to_string(), commit_id: "a873edd459c97beb51453db056c40bca85f36ef9".to_string(), @@ -1037,7 +1037,7 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str) custom_domains: vec![], routes: vec![Route { path: "/".to_string(), - application_name: format!("{}-{}", "mini-http".to_string(), &suffix), + application_name: format!("{}-{}", "mini-http", &suffix), }], sticky_sessions_enabled: false, }], @@ -1102,7 +1102,7 @@ pub fn test_db( let database_password = generate_password(provider_kind.clone(), database_mode.clone()); let db_kind_str = db_kind.name().to_string(); let db_id = generate_id(); - let database_host = format!("{}-{}", db_id, db_kind_str.clone()); + let database_host = format!("{}-{}", db_id, db_kind_str); let database_fqdn = format!( "{}.{}.{}", database_host, @@ -1125,41 +1125,41 @@ pub fn test_db( database_host.clone() }, ); - let database_port = db_infos.db_port.clone(); + let database_port = db_infos.db_port; let storage_size = 10; let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone()); let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone()); let db = Database { - kind: db_kind.clone(), + kind: db_kind, action: Action::Create, long_id: Uuid::new_v4(), - name: db_id.clone(), + name: db_id, version: version.to_string(), fqdn_id: database_host.clone(), fqdn: database_fqdn.clone(), - port: database_port.clone(), - username: database_username.clone(), - password: database_password.clone(), + port: database_port, + username: database_username, + password: database_password, total_cpus: "50m".to_string(), total_ram_in_mib: 256, - disk_size_in_gib: storage_size.clone(), - database_instance_type: db_instance_type.to_string(), - database_disk_type: db_disk_type.to_string(), + disk_size_in_gib: storage_size, + database_instance_type: db_instance_type, + database_disk_type: db_disk_type, encrypt_disk: true, activate_high_availability: false, activate_backups: false, - publicly_accessible: is_public.clone(), + publicly_accessible: is_public, mode: database_mode.clone(), }; - environment.databases = vec![db.clone()]; + environment.databases = vec![db]; - let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id()); + let app_name = format!("{}-app-{}", db_kind_str, generate_id()); environment.applications = environment .applications .into_iter() .map(|mut app| { - app.long_id = app_id.clone(); + app.long_id = app_id; app.name = to_short_id(&app_id); app.branch = app_name.clone(); app.commit_id = db_infos.app_commit.clone(); @@ -1228,34 +1228,33 @@ pub fn test_db( let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match database_mode.clone() { + match database_mode { DatabaseMode::CONTAINER => { match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) ), - Err(_) => assert!(false), + Err(_) => panic!(), }; - match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + match get_svc(context.clone(), provider_kind.clone(), environment, secrets) { Ok(svc) => assert_eq!( svc.items .expect("No items in svc") .into_iter() .filter(|svc| svc.metadata.name == database_host && &svc.spec.svc_type == "LoadBalancer") - .collect::>() - .len(), + .count(), match is_public { true => 1, false => 0, } ), - Err(_) => assert!(false), + Err(_) => panic!(), }; } DatabaseMode::MANAGED => { - match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + match get_svc(context.clone(), provider_kind.clone(), environment, secrets) { Ok(svc) => { let service = svc .items @@ -1273,7 +1272,7 @@ pub fn test_db( false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")), } } - Err(_) => assert!(false), + Err(_) => panic!(), }; } } @@ -1287,9 +1286,7 @@ pub fn test_db( localisation.as_str(), KubernetesKind::Eks, kubernetes_version, - &ClusterDomain::Default { - cluster_id: cluster_id.to_string(), - }, + &ClusterDomain::Default { cluster_id }, None, ), Kind::Do => DO::docker_cr_engine( @@ -1298,9 +1295,7 @@ pub fn test_db( localisation.as_str(), KubernetesKind::Doks, kubernetes_version, - &ClusterDomain::Default { - cluster_id: cluster_id.to_string(), - }, + &ClusterDomain::Default { cluster_id }, None, ), Kind::Scw => Scaleway::docker_cr_engine( @@ -1309,9 +1304,7 @@ pub fn test_db( localisation.as_str(), KubernetesKind::ScwKapsule, kubernetes_version, - &ClusterDomain::Default { - cluster_id: cluster_id.to_string(), - }, + &ClusterDomain::Default { cluster_id }, None, ), }; @@ -1319,10 +1312,10 @@ pub fn test_db( let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() } -pub fn get_environment_test_kubernetes<'a>( +pub fn get_environment_test_kubernetes( context: &Context, cloud_provider: Arc>, kubernetes_kind: KubernetesKind, @@ -1338,8 +1331,8 @@ pub fn get_environment_test_kubernetes<'a>( KubernetesKind::Eks => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); - if vpc_network_mode.is_some() { - options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + if let Some(vpc_network_mode) = vpc_network_mode { + options.vpc_qovery_network_mode = vpc_network_mode; } Box::new( @@ -1363,8 +1356,8 @@ pub fn get_environment_test_kubernetes<'a>( KubernetesKind::Ec2 => { let region = AwsRegion::from_str(localisation).expect("AWS region not supported"); let mut options = AWS::kubernetes_cluster_options(secrets, None); - if vpc_network_mode.is_some() { - options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + if let Some(vpc_network_mode) = vpc_network_mode { + options.vpc_qovery_network_mode = vpc_network_mode; } Box::new( @@ -1397,7 +1390,7 @@ pub fn get_environment_test_kubernetes<'a>( cloud_provider, dns_provider, DO::kubernetes_nodes(), - DO::kubernetes_cluster_options(secrets.clone(), Option::from(context.cluster_id().to_string())), + DO::kubernetes_cluster_options(secrets, Option::from(context.cluster_id().to_string())), logger, ) .unwrap(), @@ -1424,7 +1417,7 @@ pub fn get_environment_test_kubernetes<'a>( } }; - return kubernetes; + kubernetes } pub fn get_cluster_test_kubernetes<'a>( @@ -1445,8 +1438,8 @@ pub fn get_cluster_test_kubernetes<'a>( KubernetesKind::Eks => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); - if vpc_network_mode.is_some() { - options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + if let Some(vpc_network_mode) = vpc_network_mode { + options.vpc_qovery_network_mode = vpc_network_mode; } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); @@ -1457,7 +1450,7 @@ pub fn get_cluster_test_kubernetes<'a>( uuid::Uuid::new_v4(), cluster_name.as_str(), boot_version.as_str(), - aws_region.clone(), + aws_region, aws_zones, cloud_provider, dns_provider, @@ -1471,8 +1464,8 @@ pub fn get_cluster_test_kubernetes<'a>( KubernetesKind::Ec2 => { let mut options = AWS::kubernetes_cluster_options(secrets, None); let aws_region = AwsRegion::from_str(localisation).expect("expected correct AWS region"); - if vpc_network_mode.is_some() { - options.vpc_qovery_network_mode = vpc_network_mode.expect("No vpc network mode"); + if let Some(vpc_network_mode) = vpc_network_mode { + options.vpc_qovery_network_mode = vpc_network_mode; } let aws_zones = aws_zones.unwrap().into_iter().map(|zone| zone.to_string()).collect(); @@ -1483,7 +1476,7 @@ pub fn get_cluster_test_kubernetes<'a>( uuid::Uuid::new_v4(), cluster_name.as_str(), boot_version.as_str(), - aws_region.clone(), + aws_region, aws_zones, cloud_provider, dns_provider, @@ -1496,11 +1489,11 @@ pub fn get_cluster_test_kubernetes<'a>( KubernetesKind::Doks => Box::new( DOKS::new( context.clone(), - cluster_id.clone(), + cluster_id, uuid::Uuid::new_v4(), cluster_name.clone(), boot_version, - DoRegion::from_str(localisation.clone()).expect("Unknown region set for DOKS"), + DoRegion::from_str(localisation).expect("Unknown region set for DOKS"), cloud_provider, dns_provider, DO::kubernetes_nodes(), @@ -1512,11 +1505,11 @@ pub fn get_cluster_test_kubernetes<'a>( KubernetesKind::ScwKapsule => Box::new( Kapsule::new( context.clone(), - cluster_id.clone(), + cluster_id, uuid::Uuid::new_v4(), - cluster_name.clone(), + cluster_name, boot_version, - ScwZone::from_str(localisation.clone()).expect("Unknown zone set for Kapsule"), + ScwZone::from_str(localisation).expect("Unknown zone set for Kapsule"), cloud_provider, dns_provider, Scaleway::kubernetes_nodes(), @@ -1527,7 +1520,7 @@ pub fn get_cluster_test_kubernetes<'a>( ), }; - return kubernetes; + kubernetes } pub fn cluster_test( @@ -1584,8 +1577,8 @@ pub fn cluster_test( let mut delete_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut aws_zones_string: Vec = Vec::with_capacity(3); - if aws_zones.is_some() { - for zone in aws_zones.clone().unwrap() { + if let Some(aws_zones) = aws_zones { + for zone in aws_zones { aws_zones_string.push(zone.to_string()) } }; @@ -1659,7 +1652,7 @@ pub fn cluster_test( } } ClusterTestType::WithUpgrade => { - let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version.clone() + 1); + let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version + 1); let engine = match provider_kind { Kind::Aws => AWS::docker_cr_engine( &context, @@ -1668,7 +1661,7 @@ pub fn cluster_test( KubernetesKind::Eks, upgrade_to_version, cluster_domain, - vpc_network_mode.clone(), + vpc_network_mode, ), Kind::Do => DO::docker_cr_engine( &context, @@ -1677,7 +1670,7 @@ pub fn cluster_test( KubernetesKind::Doks, upgrade_to_version, cluster_domain, - vpc_network_mode.clone(), + vpc_network_mode, ), Kind::Scw => Scaleway::docker_cr_engine( &context, @@ -1686,7 +1679,7 @@ pub fn cluster_test( KubernetesKind::ScwKapsule, upgrade_to_version, cluster_domain, - vpc_network_mode.clone(), + vpc_network_mode, ), }; let mut upgrade_tx = @@ -1799,7 +1792,7 @@ pub fn test_db_on_upgrade( let database_password = "uxoyf358jojkemj".to_string(); let db_kind_str = db_kind.name().to_string(); let db_id = "c2dn5so3dltod3s".to_string(); - let database_host = format!("{}-{}", db_id, db_kind_str.clone()); + let database_host = format!("{}-{}", db_id, db_kind_str); let database_fqdn = format!( "{}.{}.{}", database_host, @@ -1822,41 +1815,41 @@ pub fn test_db_on_upgrade( database_host.clone() }, ); - let database_port = db_infos.db_port.clone(); + let database_port = db_infos.db_port; let storage_size = 10; let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone()); let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone()); let db = Database { - kind: db_kind.clone(), + kind: db_kind, action: Action::Create, long_id: Uuid::from_str("7d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(), - name: db_id.clone(), + name: db_id, version: version.to_string(), fqdn_id: database_host.clone(), fqdn: database_fqdn.clone(), - port: database_port.clone(), - username: database_username.clone(), - password: database_password.clone(), + port: database_port, + username: database_username, + password: database_password, total_cpus: "50m".to_string(), total_ram_in_mib: 256, - disk_size_in_gib: storage_size.clone(), - database_instance_type: db_instance_type.to_string(), - database_disk_type: db_disk_type.to_string(), + disk_size_in_gib: storage_size, + database_instance_type: db_instance_type, + database_disk_type: db_disk_type, encrypt_disk: true, activate_high_availability: false, activate_backups: false, - publicly_accessible: is_public.clone(), + publicly_accessible: is_public, mode: database_mode.clone(), }; - environment.databases = vec![db.clone()]; + environment.databases = vec![db]; - let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id()); + let app_name = format!("{}-app-{}", db_kind_str, generate_id()); environment.applications = environment .applications .into_iter() .map(|mut app| { - app.long_id = app_id.clone(); + app.long_id = app_id; app.name = to_short_id(&app_id); app.branch = app_name.clone(); app.commit_id = db_infos.app_commit.clone(); @@ -1925,34 +1918,33 @@ pub fn test_db_on_upgrade( let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); assert!(matches!(ret, TransactionResult::Ok)); - match database_mode.clone() { + match database_mode { DatabaseMode::CONTAINER => { match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { Ok(pvc) => assert_eq!( pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, format!("{}Gi", storage_size) ), - Err(_) => assert!(false), + Err(_) => panic!(), }; - match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + match get_svc(context, provider_kind.clone(), environment, secrets) { Ok(svc) => assert_eq!( svc.items .expect("No items in svc") .into_iter() .filter(|svc| svc.metadata.name == database_host && &svc.spec.svc_type == "LoadBalancer") - .collect::>() - .len(), + .count(), match is_public { true => 1, false => 0, } ), - Err(_) => assert!(false), + Err(_) => panic!(), }; } DatabaseMode::MANAGED => { - match get_svc(context, provider_kind.clone(), environment.clone(), secrets.clone()) { + match get_svc(context, provider_kind.clone(), environment, secrets) { Ok(svc) => { let service = svc .items @@ -1970,7 +1962,7 @@ pub fn test_db_on_upgrade( false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")), } } - Err(_) => assert!(false), + Err(_) => panic!(), }; } } @@ -2014,5 +2006,5 @@ pub fn test_db_on_upgrade( let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); assert!(matches!(ret, TransactionResult::Ok)); - return test_name.to_string(); + test_name.to_string() } diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 3d9a875d..8f55edcc 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -22,8 +22,7 @@ use qovery_engine::models::digital_ocean::DoRegion; pub const DO_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const DO_KUBERNETES_MINOR_VERSION: u8 = 20; -pub const DO_KUBERNETES_VERSION: &'static str = - formatcp!("{}.{}", DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION); +pub const DO_KUBERNETES_VERSION: &str = formatcp!("{}.{}", DO_KUBERNETES_MAJOR_VERSION, DO_KUBERNETES_MINOR_VERSION); pub const DOCR_ID: &str = "registry-the-one-and-unique"; pub const DO_TEST_REGION: DoRegion = DoRegion::Amsterdam3; pub const DO_MANAGED_DATABASE_INSTANCE_TYPE: &str = ""; @@ -45,7 +44,7 @@ pub fn container_registry_digital_ocean(context: &Context) -> DOCR { pub fn do_default_engine_config(context: &Context, logger: Box) -> EngineConfig { DO::docker_cr_engine( - &context, + context, logger, DO_TEST_REGION.to_string().as_str(), KubernetesKind::Doks, @@ -104,7 +103,7 @@ impl Cluster for DO { .expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"); Box::new(DO::new( context.clone(), - cluster_id.clone().as_str(), + cluster_id.as_str(), secrets .DIGITAL_OCEAN_TEST_ORGANIZATION_ID .expect("DIGITAL_OCEAN_KUBE_TEST_ORGANIZATION_ID is not set") diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 932c9ed9..8d2807b1 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -28,8 +28,7 @@ use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecret pub const SCW_TEST_ZONE: ScwZone = ScwZone::Paris2; pub const SCW_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const SCW_KUBERNETES_MINOR_VERSION: u8 = 19; -pub const SCW_KUBERNETES_VERSION: &'static str = - formatcp!("{}.{}", SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION); +pub const SCW_KUBERNETES_VERSION: &str = formatcp!("{}.{}", SCW_KUBERNETES_MAJOR_VERSION, SCW_KUBERNETES_MINOR_VERSION); pub const SCW_MANAGED_DATABASE_INSTANCE_TYPE: &str = "db-dev-s"; pub const SCW_MANAGED_DATABASE_DISK_TYPE: &str = "bssd"; pub const SCW_SELF_HOSTED_DATABASE_INSTANCE_TYPE: &str = ""; @@ -55,8 +54,8 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { ScalewayCR::new( context.clone(), - format!("default-registry-qovery-test-{}", random_id.clone()).as_str(), - format!("default-registry-qovery-test-{}", random_id.clone()).as_str(), + format!("default-registry-qovery-test-{}", random_id).as_str(), + format!("default-registry-qovery-test-{}", random_id).as_str(), scw_secret_key.as_str(), scw_default_project_id.as_str(), SCW_TEST_ZONE, @@ -67,7 +66,7 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR { pub fn scw_default_engine_config(context: &Context, logger: Box) -> EngineConfig { Scaleway::docker_cr_engine( - &context, + context, logger, SCW_TEST_ZONE.to_string().as_str(), KubernetesKind::ScwKapsule, @@ -102,7 +101,7 @@ impl Cluster for Scaleway { let cluster = get_environment_test_kubernetes( context, cloud_provider.clone(), - kubernetes_kind.clone(), + kubernetes_kind, kubernetes_version.as_str(), dns_provider.clone(), logger.clone(), @@ -211,7 +210,7 @@ pub fn scw_object_storage(context: Context, region: ScwZone) -> ScalewayOS { ScalewayOS::new( context, - format!("qovery-test-object-storage-{}", random_id.clone()), + format!("qovery-test-object-storage-{}", random_id), format!("Qovery Test Object-Storage {}", random_id), secrets .SCALEWAY_ACCESS_KEY @@ -251,7 +250,7 @@ pub fn clean_environments( for build in env .applications .iter() - .map(|a| a.to_build(®istry_url)) + .map(|a| a.to_build(registry_url)) .collect::>() { let _ = container_registry_client.delete_image(&build.image); diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index 1d16b621..e3f99154 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -61,18 +61,14 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { let organization_id = organization_id.to_string(); let cluster_id = cluster_id.to_string(); let execution_id = execution_id(); - let home_dir = std::env::var("WORKSPACE_ROOT_DIR").unwrap_or(home_dir().unwrap().to_str().unwrap().to_string()); + let home_dir = + std::env::var("WORKSPACE_ROOT_DIR").unwrap_or_else(|_| home_dir().unwrap().to_str().unwrap().to_string()); let lib_root_dir = std::env::var("LIB_ROOT_DIR").expect("LIB_ROOT_DIR is mandatory"); let docker_host = std::env::var("DOCKER_HOST").map(|x| Url::parse(&x).unwrap()).ok(); let docker = Docker::new(docker_host.clone()).expect("Can't init docker"); let metadata = Metadata { - dry_run_deploy: Option::from({ - match env::var_os("dry_run_deploy") { - Some(_) => true, - None => false, - } - }), + dry_run_deploy: Option::from({ env::var_os("dry_run_deploy").is_some() }), resource_expiration_in_seconds: { // set a custom ttl as environment variable for manual tests match env::var_os("ttl") { @@ -83,12 +79,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { None => Some(7200), } }, - forced_upgrade: Option::from({ - match env::var_os("forced_upgrade") { - Some(_) => true, - None => false, - } - }), + forced_upgrade: Option::from({ env::var_os("forced_upgrade").is_some() }), disable_pleco: Some(true), }; @@ -108,7 +99,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { ) } -pub fn logger<'a>() -> Box { +pub fn logger() -> Box { Box::new(StdIoLogger::new()) } @@ -164,6 +155,12 @@ struct VaultConfig { token: String, } +impl Default for FuncTestsSecrets { + fn default() -> Self { + Self::new() + } +} + impl FuncTestsSecrets { pub fn new() -> Self { Self::get_all_secrets() @@ -175,7 +172,7 @@ impl FuncTestsSecrets { None => { return Err(Error::new( ErrorKind::NotFound, - format!("VAULT_ADDR environment variable is missing"), + "VAULT_ADDR environment variable is missing".to_string(), )) } }; @@ -185,7 +182,7 @@ impl FuncTestsSecrets { None => { return Err(Error::new( ErrorKind::NotFound, - format!("VAULT_TOKEN environment variable is missing"), + "VAULT_TOKEN environment variable is missing".to_string(), )) } }; @@ -395,7 +392,7 @@ pub fn teardown(start_time: Instant, test_name: String) { info!("{} seconds for test {}", elapsed.as_seconds_f64(), test_name); } -pub fn engine_run_test(test: T) -> () +pub fn engine_run_test(test: T) where T: FnOnce() -> String, { @@ -442,10 +439,7 @@ pub fn generate_password(provider_kind: Kind, db_mode: DatabaseMode) -> String { .exclude_similar_characters(true) .strict(true); - let mut password = pg - .generate_one() - .expect("error while trying to generate a password") - .to_string(); + let mut password = pg.generate_one().expect("error while trying to generate a password"); if allow_using_symbols { for forbidden_char in forbidden_chars { @@ -464,7 +458,7 @@ pub fn check_all_connections(env: &EnvironmentRequest) -> Vec { checking.push(curl_path(path_to_test.as_str())); } - return checking; + checking } fn curl_path(path: &str) -> bool { @@ -472,11 +466,11 @@ fn curl_path(path: &str) -> bool { easy.url(path).unwrap(); let res = easy.perform(); match res { - Ok(_) => return true, + Ok(_) => true, Err(e) => { println!("TEST Error : while trying to call {}", e); - return false; + false } } } @@ -497,7 +491,7 @@ pub fn kubernetes_config_path( kubernetes_config_bucket_name, kubernetes_config_object_key, kubernetes_config_file_path.clone(), - secrets.clone(), + secrets, )?; Ok(kubernetes_config_file_path) @@ -515,9 +509,8 @@ where P: AsRef, { // return the file if it already exists and should use cache - let _ = match fs::File::open(file_path.as_ref()) { - Ok(f) => return Ok(f), - Err(_) => {} + let _ = if let Ok(f) = fs::File::open(file_path.as_ref()) { + return Ok(f); }; let file_content_result = retry::retry(Fibonacci::from_millis(3000).take(5), || { @@ -537,7 +530,7 @@ where let cluster_name = format!("qovery-{}", context.cluster_id()); let kubeconfig = match get_do_kubeconfig_by_cluster_name( secrets.clone().DIGITAL_OCEAN_TOKEN.unwrap().as_str(), - cluster_name.clone().as_str(), + cluster_name.as_str(), ) { Ok(kubeconfig) => kubeconfig, Err(e) => return OperationResult::Retry(e), @@ -559,7 +552,7 @@ where let configuration = scaleway_api_rs::apis::configuration::Configuration { api_key: Some(scaleway_api_rs::apis::configuration::ApiKey { - key: secret_access_key.to_string(), + key: secret_access_key, prefix: None, }), ..scaleway_api_rs::apis::configuration::Configuration::default() @@ -729,8 +722,8 @@ fn aws_s3_get_object( let mut cmd = QoveryCommand::new( "aws", - &vec!["s3", "cp", &s3_url, &local_path], - &vec![ + &["s3", "cp", &s3_url, &local_path], + &[ (AWS_ACCESS_KEY_ID, access_key_id), (AWS_SECRET_ACCESS_KEY, secret_access_key), ], @@ -751,7 +744,7 @@ pub fn is_pod_restarted_env( pod_to_check: &str, secrets: FuncTestsSecrets, ) -> (bool, String) { - let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); + let namespace_name = format!("{}-{}", &environment_check.project_id, &environment_check.id,); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -759,19 +752,19 @@ pub fn is_pod_restarted_env( Ok(path) => { let restarted_database = cmd::kubectl::kubectl_exec_get_number_of_restart( path.as_str(), - namespace_name.clone().as_str(), + namespace_name.as_str(), pod_to_check, - get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()), + get_cloud_provider_credentials(provider_kind, &secrets), ); match restarted_database { Ok(count) => match count.trim().eq("0") { - true => return (true, "0".to_string()), - false => return (true, count.to_string()), + true => (true, "0".to_string()), + false => (true, count.to_string()), }, - _ => return (false, "".to_string()), + _ => (false, "".to_string()), } } - Err(_e) => return (false, "".to_string()), + Err(_e) => (false, "".to_string()), } } @@ -782,24 +775,24 @@ pub fn get_pods( pod_to_check: &str, secrets: FuncTestsSecrets, ) -> Result, CommandError> { - let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); + let namespace_name = format!("{}-{}", &environment_check.project_id, &environment_check.id,); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); cmd::kubectl::kubectl_exec_get_pods( kubernetes_config.unwrap().as_str(), - Some(namespace_name.clone().as_str()), + Some(namespace_name.as_str()), Some(pod_to_check), - get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()), + get_cloud_provider_credentials(provider_kind, &secrets), ) } pub fn execution_id() -> String { Utc::now() .to_rfc3339() - .replace(":", "-") - .replace(".", "-") - .replace("+", "-") + .replace(':', "-") + .replace('.', "-") + .replace('+', "-") } // avoid test collisions @@ -831,11 +824,11 @@ pub fn generate_cluster_id(region: &str) -> String { shrink_size = current_name.chars().count() } - let mut final_name = format!("{}", ¤t_name[..shrink_size]); + let mut final_name = (¤t_name[..shrink_size]).to_string(); // do not end with a non alphanumeric char while !final_name.chars().last().unwrap().is_alphanumeric() { shrink_size -= 1; - final_name = format!("{}", ¤t_name[..shrink_size]); + final_name = (¤t_name[..shrink_size]).to_string(); } // note ensure you use only lowercase (uppercase are not allowed in lot of AWS resources) format!("{}-{}", final_name.to_lowercase(), region.to_lowercase()) @@ -850,7 +843,7 @@ pub fn get_pvc( environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); + let namespace_name = format!("{}-{}", &environment_check.project_id, &environment_check.id,); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -858,8 +851,8 @@ pub fn get_pvc( Ok(path) => { match kubectl_get_pvc( path.as_str(), - namespace_name.clone().as_str(), - get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()), + namespace_name.as_str(), + get_cloud_provider_credentials(provider_kind, &secrets), ) { Ok(pvc) => Ok(pvc), Err(e) => Err(e), @@ -875,7 +868,7 @@ pub fn get_svc( environment_check: EnvironmentRequest, secrets: FuncTestsSecrets, ) -> Result { - let namespace_name = format!("{}-{}", &environment_check.project_id.clone(), &environment_check.id.clone(),); + let namespace_name = format!("{}-{}", &environment_check.project_id, &environment_check.id,); let kubernetes_config = kubernetes_config_path(context, provider_kind.clone(), "/tmp", secrets.clone()); @@ -883,8 +876,8 @@ pub fn get_svc( Ok(path) => { match kubectl_get_svc( path.as_str(), - namespace_name.clone().as_str(), - get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()), + namespace_name.as_str(), + get_cloud_provider_credentials(provider_kind, &secrets), ) { Ok(pvc) => Ok(pvc), Err(e) => Err(e), @@ -931,24 +924,20 @@ pub fn db_infos( let database_db_name = db_id; let database_uri = format!( "mongodb://{}:{}@{}:{}/{}", - database_username, - database_password, - db_fqdn.clone(), - database_port, - database_db_name.clone() + database_username, database_password, db_fqdn, database_port, database_db_name ); DBInfos { - db_port: database_port.clone(), + db_port: database_port, db_name: database_db_name.to_string(), app_commit: "da5dd2b58b78576921373fcb4d4bddc796a804a8".to_string(), app_env_vars: btreemap! { "IS_DOCUMENTDB".to_string() => base64::encode((database_mode == MANAGED).to_string()), - "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string() => base64::encode(db_fqdn.clone()), - "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string() => base64::encode(database_uri.clone()), + "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string() => base64::encode(db_fqdn), + "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string() => base64::encode(database_uri), "QOVERY_DATABASE_TESTING_DATABASE_PORT".to_string() => base64::encode(database_port.to_string()), - "MONGODB_DBNAME".to_string() => base64::encode(database_db_name.clone()), - "QOVERY_DATABASE_TESTING_DATABASE_USERNAME".to_string() => base64::encode(database_username.clone()), - "QOVERY_DATABASE_TESTING_DATABASE_PASSWORD".to_string() => base64::encode(database_password.clone()), + "MONGODB_DBNAME".to_string() => base64::encode(database_db_name), + "QOVERY_DATABASE_TESTING_DATABASE_USERNAME".to_string() => base64::encode(database_username), + "QOVERY_DATABASE_TESTING_DATABASE_PASSWORD".to_string() => base64::encode(database_password), }, } } @@ -956,15 +945,15 @@ pub fn db_infos( let database_port = 3306; let database_db_name = db_id; DBInfos { - db_port: database_port.clone(), + db_port: database_port, db_name: database_db_name.to_string(), app_commit: "42f6553b6be617f954f903e01236e225bbb9f468".to_string(), app_env_vars: btreemap! { - "MYSQL_HOST".to_string() => base64::encode(db_fqdn.clone()), + "MYSQL_HOST".to_string() => base64::encode(db_fqdn), "MYSQL_PORT".to_string() => base64::encode(database_port.to_string()), - "MYSQL_DBNAME".to_string() => base64::encode(database_db_name.clone()), - "MYSQL_USERNAME".to_string() => base64::encode(database_username.clone()), - "MYSQL_PASSWORD".to_string() => base64::encode(database_password.clone()), + "MYSQL_DBNAME".to_string() => base64::encode(database_db_name), + "MYSQL_USERNAME".to_string() => base64::encode(database_username), + "MYSQL_PASSWORD".to_string() => base64::encode(database_password), }, } } @@ -976,15 +965,15 @@ pub fn db_infos( db_id }; DBInfos { - db_port: database_port.clone(), + db_port: database_port, db_name: database_db_name.to_string(), app_commit: "61c7a9b55a085229583b6a394dd168a4159dfd09".to_string(), app_env_vars: btreemap! { - "PG_DBNAME".to_string() => base64::encode(database_db_name.clone()), - "PG_HOST".to_string() => base64::encode(db_fqdn.clone()), + "PG_DBNAME".to_string() => base64::encode(database_db_name), + "PG_HOST".to_string() => base64::encode(db_fqdn), "PG_PORT".to_string() => base64::encode(database_port.to_string()), - "PG_USERNAME".to_string() => base64::encode(database_username.clone()), - "PG_PASSWORD".to_string() => base64::encode(database_password.clone()), + "PG_USERNAME".to_string() => base64::encode(database_username), + "PG_PASSWORD".to_string() => base64::encode(database_password), }, } } @@ -992,15 +981,15 @@ pub fn db_infos( let database_port = 6379; let database_db_name = db_id; DBInfos { - db_port: database_port.clone(), - db_name: database_db_name.to_string(), + db_port: database_port, + db_name: database_db_name, app_commit: "e4b1162741ce162b834b68498e43bf60f0f58cbe".to_string(), app_env_vars: btreemap! { "IS_ELASTICCACHE".to_string() => base64::encode((database_mode == MANAGED).to_string()), - "REDIS_HOST".to_string() => base64::encode(db_fqdn.clone()), + "REDIS_HOST".to_string() => base64::encode(db_fqdn), "REDIS_PORT".to_string() => base64::encode(database_port.to_string()), - "REDIS_USERNAME".to_string() => base64::encode(database_username.clone()), - "REDIS_PASSWORD".to_string() => base64::encode(database_password.clone()), + "REDIS_USERNAME".to_string() => base64::encode(database_username), + "REDIS_PASSWORD".to_string() => base64::encode(database_password), }, } } From e891576f795a59b701ec05f1222b66af16bcc4aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Fri, 6 May 2022 16:30:19 +0200 Subject: [PATCH 115/122] Pin rust version in toolchain to match gitlab CI --- .github/workflows/tests.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index df94bc3d..51710617 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,9 @@ jobs: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: - toolchain: stable + toolchain: 1.60.0 + components: rustfmt, clippy + - uses: hashicorp/setup-terraform@v1 with: terraform_version: 0.14.10 @@ -30,7 +32,6 @@ jobs: run: | echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) - rustup component add clippy cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache From 63fbf385c010eff78f36de095e300ef45098512e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Fri, 6 May 2022 16:45:57 +0200 Subject: [PATCH 116/122] Fix linter --- test_utilities/src/utilities.rs | 4 ++-- tests/helm/cert_manager.rs | 30 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index e3f99154..d94110b8 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -68,7 +68,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { let docker = Docker::new(docker_host.clone()).expect("Can't init docker"); let metadata = Metadata { - dry_run_deploy: Option::from({ env::var_os("dry_run_deploy").is_some() }), + dry_run_deploy: Option::from(env::var_os("dry_run_deploy").is_some()), resource_expiration_in_seconds: { // set a custom ttl as environment variable for manual tests match env::var_os("ttl") { @@ -79,7 +79,7 @@ pub fn context(organization_id: &str, cluster_id: &str) -> Context { None => Some(7200), } }, - forced_upgrade: Option::from({ env::var_os("forced_upgrade").is_some() }), + forced_upgrade: Option::from(env::var_os("forced_upgrade").is_some()), disable_pleco: Some(true), }; diff --git a/tests/helm/cert_manager.rs b/tests/helm/cert_manager.rs index f356bc21..454a7b3c 100644 --- a/tests/helm/cert_manager.rs +++ b/tests/helm/cert_manager.rs @@ -1,21 +1,21 @@ -use base64::decode; + use qovery_engine::cloud_provider::helm::{ - deploy_charts_levels, ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces, + ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces, }; use qovery_engine::cmd::helm::Helm; -use qovery_engine::cmd::kubectl::{kubectl_exec_delete_namespace, kubectl_exec_get_secrets, kubectl_get_resource_yaml}; -use qovery_engine::cmd::structs::SecretItem; -use qovery_engine::fs::list_yaml_backup_files; + + + use serde_derive::Deserialize; use serde_derive::Serialize; -use std::fs; -use std::fs::OpenOptions; -use std::io::{BufRead, BufReader}; -use std::path::{Path, PathBuf}; -use std::str::from_utf8; -use std::thread::sleep; -use std::time::Duration; -use tempdir::TempDir; + + + +use std::path::{PathBuf}; + + + + use test_utilities::utilities::FuncTestsSecrets; #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -143,7 +143,7 @@ fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) { }, ChartSetValue { key: "provider.cloudflare.apiToken".to_string(), - value: vault_secrets.CLOUDFLARE_TOKEN.unwrap().to_string(), + value: vault_secrets.CLOUDFLARE_TOKEN.unwrap(), }, ChartSetValue { key: "provider.cloudflare.email".to_string(), @@ -151,7 +151,7 @@ fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) { }, ChartSetValue { key: "acme.letsEncrypt.emailReport".to_string(), - value: vault_secrets.CLOUDFLARE_ID.unwrap().to_string(), + value: vault_secrets.CLOUDFLARE_ID.unwrap(), }, ChartSetValue { key: "acme.letsEncrypt.acmeUrl".to_string(), From 1b1df313da01adbd979e0cc688df2461b1783c67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Fri, 6 May 2022 16:48:08 +0200 Subject: [PATCH 117/122] fmt --- tests/helm/cert_manager.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/tests/helm/cert_manager.rs b/tests/helm/cert_manager.rs index 454a7b3c..11a8f048 100644 --- a/tests/helm/cert_manager.rs +++ b/tests/helm/cert_manager.rs @@ -1,20 +1,10 @@ - -use qovery_engine::cloud_provider::helm::{ - ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces, -}; +use qovery_engine::cloud_provider::helm::{ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces}; use qovery_engine::cmd::helm::Helm; - - use serde_derive::Deserialize; use serde_derive::Serialize; - - -use std::path::{PathBuf}; - - - +use std::path::PathBuf; use test_utilities::utilities::FuncTestsSecrets; From 73e1d47e4eb8cad583291e350a877198bea2397f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Fri, 6 May 2022 18:18:38 +0200 Subject: [PATCH 118/122] Bump deps --- Cargo.toml | 78 +++++++++++++++--------------- src/build_platform/local_docker.rs | 6 +-- test_utilities/Cargo.toml | 40 +++++++-------- test_utilities/src/utilities.rs | 3 +- 4 files changed, 64 insertions(+), 63 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1d074574..191acbf7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,78 +8,78 @@ edition = "2018" [dependencies] chrono = "0.4.19" -cmd_lib = "1.0.13" +cmd_lib = "1.3.0" derivative = "2.2.0" -git2 = "0.14.2" +git2 = "0.14.3" walkdir = "2.3.2" -itertools = "0.10.0" +itertools = "0.10.3" base64 = "0.13.0" -dirs = "3.0.2" +dirs = "4.0.0" rust-crypto = "0.2.36" -retry = "1.2.1" -trust-dns-resolver = "0.20.3" -rand = "0.8.3" -semver = "1.0.4" -gethostname = "0.2.1" -reqwest = { version = "0.11.3", features = ["blocking", "json"] } -futures = "0.3.15" +retry = "1.3.1" +trust-dns-resolver = "0.21.2" +rand = "0.8.5" +semver = "1.0.9" +gethostname = "0.2.3" +reqwest = { version = "0.11.10", features = ["blocking", "json"] } +futures = "0.3.21" timeout-readwrite = "0.3.1" lazy_static = "1.4.0" -uuid = { version = "0.8", features = ["v4", "serde"] } +uuid = { version = "1.0.0", features = ["v4", "serde"] } url = "2.2.2" -function_name = "0.2.0" -thiserror = "1.0.30" -strum = "0.23" -strum_macros = "0.23" +function_name = "0.2.1" +thiserror = "1.0.31" +strum = "0.24.0" +strum_macros = "0.24.0" urlencoding = "2.1.0" # FIXME use https://crates.io/crates/blocking instead of runtime.rs # tar gz -flate2 = "1.0.20" # tar gz -tar = ">=0.4.36" +flate2 = "1.0.23" +tar = "0.4.38" # logger -tracing = "0.1.26" -tracing-subscriber = "0.2.18" +tracing = "0.1.34" +tracing-subscriber = "0.3.11" # Docker deps # shiplift = "0.6.0" # Filesystem -sysinfo = "0.18.2" +sysinfo = "0.23.11" # Jinja2 -tera = "1.10.0" +tera = "1.15.0" # Json -serde = "1.0.126" -serde_json = "1.0.64" -serde_derive = "1.0.126" -serde_yaml = "0.8.23" +serde = "1.0.137" +serde_json = "1.0.81" +serde_derive = "1.0.137" +serde_yaml = "0.8.24" # AWS deps -tokio = { version = "1.10.0", features = ["full"] } -rusoto_core = "0.47.0" -rusoto_sts = "0.47.0" -rusoto_credential = "0.47.0" -rusoto_ecr = "0.47.0" -rusoto_eks = "0.47.0" -rusoto_s3 = "0.47.0" -rusoto_dynamodb = "0.47.0" -rusoto_iam = "0.47.0" +tokio = { version = "1.18.1", features = ["full"] } +rusoto_core = "0.48.0" +rusoto_sts = "0.48.0" +rusoto_credential = "0.48.0" +rusoto_ecr = "0.48.0" +rusoto_eks = "0.48.0" +rusoto_s3 = "0.48.0" +rusoto_dynamodb = "0.48.0" +rusoto_iam = "0.48.0" # Digital Ocean Deps digitalocean = "0.1.1" # Scaleway Deps -scaleway_api_rs = "=0.1.2" +scaleway_api_rs = "0.1.2" [dev-dependencies] test-utilities = { path = "test_utilities" } -tempdir = "0.3" -tempfile = "3.2.0" +tempdir = "0.3.7" +tempfile = "3.3.0" maplit = "1.0.2" -tracing-test = "0.1.0" +tracing-test = "0.2.1" [features] default = [] diff --git a/src/build_platform/local_docker.rs b/src/build_platform/local_docker.rs index bab2eff3..74204d98 100644 --- a/src/build_platform/local_docker.rs +++ b/src/build_platform/local_docker.rs @@ -78,14 +78,14 @@ impl LocalDocker { let mut disk_free_space_percent: u64 = 100; let sys_info = sysinfo::System::new_with_specifics(RefreshKind::new().with_disks().with_disks_list()); - let should_reclaim_space = sys_info.get_disks().iter().any(|disk| { + let should_reclaim_space = sys_info.disks().iter().any(|disk| { // Check disk own the mount point we are interested in - if !mount_points_to_check.contains(&disk.get_mount_point()) { + if !mount_points_to_check.contains(&disk.mount_point()) { return false; } // Check if we have hit our threshold regarding remaining disk space - disk_free_space_percent = disk.get_available_space() * 100 / disk.get_total_space(); + disk_free_space_percent = disk.available_space() * 100 / disk.total_space(); if disk_free_space_percent <= DISK_FREE_SPACE_PERCENTAGE_BEFORE_PURGE { return true; } diff --git a/test_utilities/Cargo.toml b/test_utilities/Cargo.toml index d74d5875..b05ea8fb 100644 --- a/test_utilities/Cargo.toml +++ b/test_utilities/Cargo.toml @@ -8,31 +8,31 @@ edition = "2018" [dependencies] base64 = "0.13.0" -bstr = "0.2.16" +bstr = "0.2.17" qovery-engine = { path = "../" } -chrono = "0.4.11" -dirs = "3.0.1" -gethostname = "0.2.1" -passwords = "3.1.7" -rand = "0.7.3" -serde = "1.0" -serde_json = "1.0.57" -serde_derive = "1.0" -curl = "0.4.34" -reqwest = { version = "0.11.3", features = ["blocking", "json"] } -tracing = "0.1" -tracing-subscriber = "0.2" -retry = "1.0.0" -time = "0.2.23" -hashicorp_vault = "2.0.1" +chrono = "0.4.19" +dirs = "4.0.0" +gethostname = "0.2.3" +passwords = "3.1.9" +rand = "0.8.5" +serde = "1.0.137" +serde_json = "1.0.81" +serde_derive = "1.0.137" +curl = "0.4.43" +reqwest = { version = "0.11.10", features = ["blocking", "json"] } +tracing = "0.1.34" +tracing-subscriber = { version = "0.3.11", features = ["json"] } +retry = "1.3.1" +time = "0.3.9" +hashicorp_vault = "2.1.0" maplit = "1.0.2" -uuid = { version = "0.8", features = ["v4"] } -const_format = "0.2.22" +uuid = { version = "1.0.0", features = ["v4"] } +const_format = "0.2.23" url = "2.2.2" -tokio = { version = "1.10.0", features = ["full"] } +tokio = { version = "1.18.1", features = ["full"] } # Digital Ocean Deps digitalocean = "0.1.1" # Scaleway Deps -scaleway_api_rs = "=0.1.2" +scaleway_api_rs = "0.1.2" diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index d94110b8..c5847196 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -408,7 +408,8 @@ pub fn generate_id() -> String { let uuid; loop { - let rand_string: String = thread_rng().sample_iter(Alphanumeric).take(15).collect(); + let rand_string: Vec = thread_rng().sample_iter(Alphanumeric).take(15).collect(); + let rand_string = String::from_utf8(rand_string).unwrap(); if rand_string.chars().next().unwrap().is_alphabetic() { uuid = rand_string.to_lowercase(); break; From 19e67214a6335286f18176c3391c6b94d0ad2ba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=A3rebe=20-=20Romain=20GERARD?= Date: Mon, 9 May 2022 10:19:29 +0200 Subject: [PATCH 119/122] Bump lock --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 51710617..65451efd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -32,7 +32,7 @@ jobs: run: | echo "########## LINTER ##########" cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1) - cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) + cargo clippy --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1) export PATH=$GITHUB_WORKSPACE/bin:$PATH export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache export SCCACHE_REDIS=${{ secrets.SCCACHE_REDIS }} From 6635bb5557e1fcd2555f51096e0a4e5c734aa0bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Mon, 9 May 2022 12:30:53 +0200 Subject: [PATCH 120/122] Agents use jwt_token instead of static secret (#709) --- src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs | 2 +- src/cloud_provider/aws/kubernetes/eks_helm_charts.rs | 4 ++-- src/cloud_provider/aws/kubernetes/mod.rs | 1 + src/cloud_provider/digitalocean/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/digitalocean/kubernetes/mod.rs | 1 + src/cloud_provider/helm.rs | 8 ++++---- src/cloud_provider/scaleway/kubernetes/helm_charts.rs | 2 +- src/cloud_provider/scaleway/kubernetes/mod.rs | 3 +++ test_utilities/src/aws.rs | 1 + test_utilities/src/digitalocean.rs | 1 + test_utilities/src/scaleway.rs | 3 +++ test_utilities/src/utilities.rs | 3 +++ 12 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs index 2756894c..1dc262e3 100644 --- a/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/ec2_helm_charts.rs @@ -335,7 +335,7 @@ pub fn ec2_aws_helm_charts( organization_long_id: &chart_config_prerequisites.organization_long_id, cluster_id: &chart_config_prerequisites.cluster_id, cluster_long_id: &chart_config_prerequisites.cluster_long_id, - cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token, grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, }; let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; diff --git a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs index 77e73e83..ad3e0a1c 100644 --- a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs @@ -990,7 +990,7 @@ datasources: organization_long_id: &chart_config_prerequisites.organization_long_id, cluster_id: &chart_config_prerequisites.cluster_id, cluster_long_id: &chart_config_prerequisites.cluster_long_id, - cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token, grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, }; let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; @@ -1001,7 +1001,7 @@ datasources: organization_long_id: &chart_config_prerequisites.organization_long_id, cluster_id: &chart_config_prerequisites.cluster_id, cluster_long_id: &chart_config_prerequisites.cluster_long_id, - cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + cluster_token: &chart_config_prerequisites.infra_options.jwt_token, grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, }; let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?; diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 89c1ab06..31bf1e66 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -98,6 +98,7 @@ pub struct Options { pub qovery_api_url: String, pub qovery_grpc_url: String, pub qovery_cluster_secret_token: String, + pub jwt_token: String, pub qovery_engine_location: EngineLocation, pub engine_version_controller_token: String, pub agent_version_controller_token: String, diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index 3e9f733e..4ae89453 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -801,7 +801,7 @@ datasources: organization_long_id: &chart_config_prerequisites.organization_long_id, cluster_id: &chart_config_prerequisites.cluster_id, cluster_long_id: &chart_config_prerequisites.cluster_long_id, - cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + cluster_jwt_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, }; let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; diff --git a/src/cloud_provider/digitalocean/kubernetes/mod.rs b/src/cloud_provider/digitalocean/kubernetes/mod.rs index 2f15f74c..8e531c83 100644 --- a/src/cloud_provider/digitalocean/kubernetes/mod.rs +++ b/src/cloud_provider/digitalocean/kubernetes/mod.rs @@ -76,6 +76,7 @@ pub struct DoksOptions { pub qovery_api_url: String, pub qovery_grpc_url: String, pub qovery_cluster_secret_token: String, + pub jwt_token: String, pub qovery_engine_location: EngineLocation, pub engine_version_controller_token: String, pub agent_version_controller_token: String, diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 5f8e1702..a01122d6 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -758,7 +758,7 @@ pub fn get_chart_for_shell_agent( value: context.grpc_url.to_string(), }, ChartSetValue { - key: "environmentVariables.CLUSTER_TOKEN".to_string(), + key: "environmentVariables.CLUSTER_JWT_TOKEN".to_string(), value: context.cluster_token.to_string(), }, ChartSetValue { @@ -800,7 +800,7 @@ pub struct ClusterAgentContext<'a> { pub organization_long_id: &'a Uuid, pub cluster_id: &'a str, pub cluster_long_id: &'a Uuid, - pub cluster_token: &'a str, + pub cluster_jwt_token: &'a str, pub grpc_url: &'a str, } @@ -842,8 +842,8 @@ pub fn get_chart_for_cluster_agent( value: context.grpc_url.to_string(), }, ChartSetValue { - key: "environmentVariables.CLUSTER_TOKEN".to_string(), - value: context.cluster_token.to_string(), + key: "environmentVariables.CLUSTER_JWT_TOKEN".to_string(), + value: context.cluster_jwt_token.to_string(), }, ChartSetValue { key: "environmentVariables.CLUSTER_ID".to_string(), diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index c35e17b4..afba9298 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -674,7 +674,7 @@ datasources: organization_long_id: &chart_config_prerequisites.organization_long_id, cluster_id: &chart_config_prerequisites.cluster_id, cluster_long_id: &chart_config_prerequisites.cluster_long_id, - cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, + cluster_jwt_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token, grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url, }; let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?; diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index 78fd8bde..a82d9d23 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -63,6 +63,7 @@ pub struct KapsuleOptions { pub qovery_api_url: String, pub qovery_grpc_url: String, pub qovery_cluster_secret_token: String, + pub jwt_token: String, pub qovery_nats_url: String, pub qovery_nats_user: String, pub qovery_nats_password: String, @@ -89,6 +90,7 @@ impl KapsuleOptions { qovery_api_url: String, qovery_grpc_url: String, qovery_cluster_secret_token: String, + qoverry_cluster_jwt_token: String, qovery_nats_url: String, qovery_nats_user: String, qovery_nats_password: String, @@ -107,6 +109,7 @@ impl KapsuleOptions { qovery_api_url, qovery_grpc_url, qovery_cluster_secret_token, + jwt_token: qoverry_cluster_jwt_token, qovery_nats_url, qovery_nats_user, qovery_nats_password, diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 185b7ad5..2a821c02 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -248,6 +248,7 @@ impl Cluster for AWS { tls_email_report: secrets.LETS_ENCRYPT_EMAIL_REPORT.unwrap(), qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(), qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(), + jwt_token: secrets.QOVERY_CLUSTER_JWT_TOKEN.unwrap(), } } } diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 8f55edcc..ce24f110 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -149,6 +149,7 @@ impl Cluster for DO { qovery_api_url: secrets.QOVERY_API_URL.unwrap(), qovery_grpc_url: secrets.QOVERY_GRPC_URL.unwrap(), qovery_cluster_secret_token: secrets.QOVERY_CLUSTER_SECRET_TOKEN.unwrap(), + jwt_token: "".to_string(), qovery_engine_location: EngineLocation::ClientSide, engine_version_controller_token: secrets.QOVERY_ENGINE_CONTROLLER_TOKEN.unwrap(), agent_version_controller_token: secrets.QOVERY_AGENT_CONTROLLER_TOKEN.unwrap(), diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index 8d2807b1..92dfb4b4 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -171,6 +171,9 @@ impl Cluster for Scaleway { secrets .QOVERY_CLUSTER_SECRET_TOKEN .expect("QOVERY_CLUSTER_SECRET_TOKEN is not set in secrets"), + secrets + .QOVERY_CLUSTER_JWT_TOKEN + .expect("QOVERY_CLUSTER_JWT_TOKEN is not set in secrets"), secrets.QOVERY_NATS_URL.expect("QOVERY_NATS_URL is not set in secrets"), secrets .QOVERY_NATS_USERNAME diff --git a/test_utilities/src/utilities.rs b/test_utilities/src/utilities.rs index c5847196..91858a5f 100644 --- a/test_utilities/src/utilities.rs +++ b/test_utilities/src/utilities.rs @@ -148,6 +148,7 @@ pub struct FuncTestsSecrets { pub TERRAFORM_AWS_REGION: Option, pub QOVERY_GRPC_URL: Option, pub QOVERY_CLUSTER_SECRET_TOKEN: Option, + pub QOVERY_CLUSTER_JWT_TOKEN: Option, } struct VaultConfig { @@ -238,6 +239,7 @@ impl FuncTestsSecrets { TERRAFORM_AWS_REGION: None, QOVERY_GRPC_URL: None, QOVERY_CLUSTER_SECRET_TOKEN: None, + QOVERY_CLUSTER_JWT_TOKEN: None, }; let vault_config = match Self::get_vault_config() { @@ -358,6 +360,7 @@ impl FuncTestsSecrets { "QOVERY_CLUSTER_SECRET_TOKEN", secrets.QOVERY_CLUSTER_SECRET_TOKEN, ), + QOVERY_CLUSTER_JWT_TOKEN: Self::select_secret("QOVERY_CLUSTER_JWT_TOKEN", secrets.QOVERY_CLUSTER_JWT_TOKEN), } } } From 8105b0f620c4486c01eb041f2968a07f44b9fcd9 Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Mon, 9 May 2022 17:00:20 +0200 Subject: [PATCH 121/122] fix: make aws ec2 option inputs optional (#712) --- src/cloud_provider/aws/kubernetes/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 31bf1e66..f1e76001 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -65,8 +65,11 @@ impl fmt::Display for VpcQoveryNetworkMode { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Options { // AWS related + #[serde(default)] // TODO: remove default pub ec2_zone_a_subnet_blocks: Vec, + #[serde(default)] // TODO: remove default pub ec2_zone_b_subnet_blocks: Vec, + #[serde(default)] // TODO: remove default pub ec2_zone_c_subnet_blocks: Vec, pub eks_zone_a_subnet_blocks: Vec, pub eks_zone_b_subnet_blocks: Vec, @@ -86,9 +89,11 @@ pub struct Options { pub vpc_qovery_network_mode: VpcQoveryNetworkMode, pub vpc_cidr_block: String, pub eks_cidr_subnet: String, + #[serde(default)] // TODO: remove default pub ec2_cidr_subnet: String, pub vpc_custom_routing_table: Vec, pub eks_access_cidr_blocks: Vec, + #[serde(default)] // TODO: remove default pub ec2_access_cidr_blocks: Vec, pub rds_cidr_subnet: String, pub documentdb_cidr_subnet: String, From 0c8e62cf52e8893361e10cc9f80523b1eba72fba Mon Sep 17 00:00:00 2001 From: BenjaminCh Date: Tue, 10 May 2022 10:59:33 +0200 Subject: [PATCH 122/122] Fix: typo t2x.large to t2.xlarge (#713) --- src/cloud_provider/aws/kubernetes/node.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cloud_provider/aws/kubernetes/node.rs b/src/cloud_provider/aws/kubernetes/node.rs index 2a46a27d..e4f7ba2d 100644 --- a/src/cloud_provider/aws/kubernetes/node.rs +++ b/src/cloud_provider/aws/kubernetes/node.rs @@ -19,7 +19,7 @@ impl InstanceType for AwsInstancesType { fn to_cloud_provider_format(&self) -> String { match self { AwsInstancesType::T2Large => "t2.large", - AwsInstancesType::T2Xlarge => "t2x.large", + AwsInstancesType::T2Xlarge => "t2.xlarge", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", AwsInstancesType::T3aMedium => "t3a.medium", @@ -34,7 +34,7 @@ impl AwsInstancesType { pub fn as_str(&self) -> &str { match self { AwsInstancesType::T2Large => "t2.large", - AwsInstancesType::T2Xlarge => "t2x.large", + AwsInstancesType::T2Xlarge => "t2.xlarge", AwsInstancesType::T3Large => "t3.large", AwsInstancesType::T3Xlarge => "t3.xlarge", AwsInstancesType::T3aMedium => "t3a.medium", @@ -48,7 +48,7 @@ impl fmt::Display for AwsInstancesType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { AwsInstancesType::T2Large => write!(f, "t2.large"), - AwsInstancesType::T2Xlarge => write!(f, "t2x.large"), + AwsInstancesType::T2Xlarge => write!(f, "t2.xlarge"), AwsInstancesType::T3Large => write!(f, "t3.large"), AwsInstancesType::T3Xlarge => write!(f, "t3.xlarge"), AwsInstancesType::T3aMedium => write!(f, "t3a.medium"), @@ -64,7 +64,7 @@ impl FromStr for AwsInstancesType { fn from_str(s: &str) -> Result { match s { "t2.large" => Ok(AwsInstancesType::T2Large), - "t2x.large" => Ok(AwsInstancesType::T2Xlarge), + "t2.xlarge" => Ok(AwsInstancesType::T2Xlarge), "t3.large" => Ok(AwsInstancesType::T3Large), "t3.xlarge" => Ok(AwsInstancesType::T3Xlarge), "t3a.medium" => Ok(AwsInstancesType::T3aMedium),