From 2341163fb20bfa602e1c376d9c83c75c42e41547 Mon Sep 17 00:00:00 2001 From: Bilel Benamira Date: Mon, 16 May 2022 16:09:20 +0200 Subject: [PATCH] wip --- lib/aws/bootstrap/eks-workers-nodes.j2.tf | 4 +- src/cloud_provider/aws/kubernetes/mod.rs | 148 +++++++++++++++++++++- src/cloud_provider/models.rs | 16 +++ src/errors/io.rs | 2 + src/errors/mod.rs | 30 +++++ test_utilities/src/aws.rs | 14 +- test_utilities/src/common.rs | 110 +++++++++++++++- test_utilities/src/digitalocean.rs | 14 +- test_utilities/src/scaleway.rs | 18 ++- tests/aws/aws_whole_enchilada.rs | 42 ++++++ 10 files changed, 378 insertions(+), 20 deletions(-) diff --git a/lib/aws/bootstrap/eks-workers-nodes.j2.tf b/lib/aws/bootstrap/eks-workers-nodes.j2.tf index 8897c303..754550b2 100644 --- a/lib/aws/bootstrap/eks-workers-nodes.j2.tf +++ b/lib/aws/bootstrap/eks-workers-nodes.j2.tf @@ -18,14 +18,16 @@ resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" { ) scaling_config { - desired_size = "{{ eks_worker_node.min_nodes }}" + desired_size = "{{ eks_worker_desired_nodes }}" max_size = "{{ eks_worker_node.max_nodes }}" min_size = "{{ eks_worker_node.min_nodes }}" } lifecycle { // don't update the desired size and let the cluster-autoscaler do the job + {% if not eks_worker_update_desired_nodes %} ignore_changes = [scaling_config[0].desired_size] + {% endif %} create_before_destroy = true } diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 2b483a6b..e01f5a08 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -1,6 +1,7 @@ use core::fmt; use std::env; use std::path::Path; +use std::str::FromStr; use retry::delay::{Fibonacci, Fixed}; use retry::Error::Operation; @@ -24,7 +25,9 @@ use crate::cloud_provider::utilities::{wait_until_port_is_open, TcpCheckSource}; use crate::cloud_provider::CloudProvider; use crate::cmd; use crate::cmd::helm::{to_engine_error, Helm}; -use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events}; +use crate::cmd::kubectl::{ + kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, kubectl_exec_get_node, +}; use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::dns_provider::DnsProvider; @@ -32,7 +35,11 @@ use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity}; use crate::events::{EngineEvent, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; use crate::io_models::{Context, Features, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString}; use crate::object_storage::s3::S3; +use crate::runtime::block_on; use crate::string::terraform_list_format; +use rusoto_core::credential::StaticProvider; +use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; +use rusoto_eks::{DescribeNodegroupRequest, Eks, EksClient, ListNodegroupsRequest, NodegroupScalingConfig}; pub mod ec2; mod ec2_helm_charts; @@ -380,7 +387,10 @@ fn tera_context( match env::var_os("VAULT_SECRET_ID") { Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), None => kubernetes.logger().log(EngineEvent::Error( - EngineError::new_missing_required_env_variable(event_details, "VAULT_SECRET_ID".to_string()), + EngineError::new_missing_required_env_variable( + event_details.clone(), + "VAULT_SECRET_ID".to_string(), + ), None, )), } @@ -428,6 +438,12 @@ fn tera_context( .as_str(), ); + let (update_desired_node, desired_nodes) = match should_update_desired_nodes(event_details, kubernetes, node_groups) + { + Err(e) => return Err(e), + Ok(value) => value, + }; + context.insert("aws_region", &kubernetes.region()); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); @@ -444,6 +460,8 @@ fn tera_context( context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_worker_nodes", &node_groups); + context.insert("eks_worker_desired_nodes", &desired_nodes); + context.insert("eks_worker_update_desired_nodes", &update_desired_node); context.insert("ec2_zone_a_subnet_blocks_private", &ec2_zone_a_subnet_blocks_private); context.insert("ec2_zone_b_subnet_blocks_private", &ec2_zone_b_subnet_blocks_private); context.insert("ec2_zone_c_subnet_blocks_private", &ec2_zone_c_subnet_blocks_private); @@ -497,6 +515,132 @@ fn tera_context( Ok(context) } +/// Returns a tuple of (update_desired_node: bool, desired_nodes_count: i32). +fn should_update_desired_nodes( + event_details: EventDetails, + kubernetes: &dyn Kubernetes, + node_groups: &[NodeGroups], +) -> Result<(bool, i32), EngineError> { + let cloud_provider = kubernetes.cloud_provider(); + let future_node_group = match node_groups.is_empty() { + false => node_groups.first().expect("unable to get node_group"), + true => { + return Err(EngineError::new_cluster_has_no_worker_nodes( + event_details, + Some(CommandError::new_from_safe_message( + "Could not find node_group in terra context".to_string(), + )), + )); + } + }; + + let scaling_config = match get_node_scaling_config(event_details.clone(), kubernetes) { + Ok(value) => match value { + Some(v) => v, + None => return Ok((false, future_node_group.min_nodes)), + }, + Err(error) => return Err(error), + }; + + let should_update_desired_nodes = scaling_config.min_size.unwrap_or_default() + != i64::from(future_node_group.min_nodes) + || scaling_config.max_size.unwrap_or_default() != i64::from(future_node_group.max_nodes); + + let kubeconfig = match kubernetes.get_kubeconfig_file() { + Ok((path, _)) => path, + Err(_) => return Ok((false, scaling_config.desired_size.unwrap() as i32)), + }; + let get_node_result = retry::retry(Fixed::from_millis(10000).take(5), || { + match kubectl_exec_get_node(kubeconfig.clone(), cloud_provider.credentials_environment_variables().clone()) { + Err(e) => OperationResult::Retry(e), + Ok(nodes) => OperationResult::Ok(nodes.items.len() as i32), + } + }); + + let actual_nodes_count = match get_node_result { + Ok(value) => value, + Err(Operation { error, .. }) => { + return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, Some(error))); + } + Err(retry::Error::Internal(e)) => { + return Err(EngineError::new_cluster_has_no_worker_nodes( + event_details, + Some(CommandError::new_from_safe_message(e)), + )); + } + }; + + match future_node_group.get_desired_nodes(event_details, actual_nodes_count) { + Ok(desired_nodes) => Ok((should_update_desired_nodes, desired_nodes)), + Err(error) => Err(error), + } +} + +/// Returns a rusoto eks client using the current configuration. +fn get_rusoto_eks_client(event_details: EventDetails, kubernetes: &dyn Kubernetes) -> Result { + let cloud_provider = kubernetes.cloud_provider(); + let region = match RusotoRegion::from_str(&kubernetes.region()) { + Ok(value) => value, + Err(error) => { + return Err(EngineError::new_unsupported_region( + event_details, + "".to_string(), + CommandError::new_from_safe_message(error.to_string()), + )); + } + }; + + let credentials = + StaticProvider::new(cloud_provider.access_key_id(), cloud_provider.secret_access_key(), None, None); + + let client = Client::new_with(credentials, HttpClient::new().expect("unable to create new Http client")); + Ok(EksClient::new_with_client(client, region)) +} + +/// Returns the scaling config of a node_group by node_group_name. +fn get_node_scaling_config( + event_details: EventDetails, + kubernetes: &dyn Kubernetes, +) -> Result, EngineError> { + let eks_client = match get_rusoto_eks_client(event_details.clone(), kubernetes) { + Ok(value) => value, + Err(error) => return Err(error), + }; + + let node_groups = match block_on(eks_client.list_nodegroups(ListNodegroupsRequest { + cluster_name: kubernetes.cluster_name(), + ..Default::default() + })) { + Ok(res) => res.nodegroups.unwrap_or_default(), + Err(_) => return Ok(Option::None), + }; + + if node_groups.is_empty() { + return Err(EngineError::new_cluster_has_no_worker_nodes( + event_details, + Some(CommandError::new_from_safe_message(format!( + "Could not find node_groups for cluster {}", + kubernetes.cluster_name(), + ))), + )); + } + + let actual_nodes_group = match block_on(eks_client.describe_nodegroup(DescribeNodegroupRequest { + cluster_name: kubernetes.cluster_name(), + nodegroup_name: node_groups.first().unwrap().to_string(), + })) { + Ok(res) => res.nodegroup.unwrap_or_default(), + Err(error) => { + return Err(EngineError::new_cluster_has_no_worker_nodes( + event_details, + Some(CommandError::new_from_safe_message(error.to_string())), + )); + } + }; + + Ok(actual_nodes_group.scaling_config) +} + fn create( kubernetes: &dyn Kubernetes, kubernetes_long_id: uuid::Uuid, diff --git a/src/cloud_provider/models.rs b/src/cloud_provider/models.rs index 110ce67e..c5f7b4ac 100644 --- a/src/cloud_provider/models.rs +++ b/src/cloud_provider/models.rs @@ -1,3 +1,5 @@ +use crate::errors::EngineError; +use crate::events::EventDetails; use serde::{Deserialize, Serialize}; #[derive(Clone, Eq, PartialEq, Hash)] @@ -72,6 +74,20 @@ pub struct NodeGroups { pub disk_size_in_gib: i32, } +impl NodeGroups { + pub fn get_desired_nodes(&self, event_details: EventDetails, actual_nodes_count: i32) -> Result { + if actual_nodes_count > self.max_nodes { + Result::Err(EngineError::new_cannot_deploy_max_nodes_exceeded( + event_details, + actual_nodes_count, + self.max_nodes, + )) + } else { + Result::Ok(self.max_nodes) + } + } +} + #[derive(Serialize, Deserialize)] pub struct NodeGroupsFormat { pub name: String, diff --git a/src/errors/io.rs b/src/errors/io.rs index 2989f8c2..864cd605 100644 --- a/src/errors/io.rs +++ b/src/errors/io.rs @@ -30,6 +30,7 @@ pub enum Tag { CannotRetrieveClusterConfigFile, CannotCreateFile, CannotGetClusterNodes, + NotEnoughNodesAvailableToDeployEnvironment, NotEnoughResourcesToDeployEnvironment, CannotUninstallHelmChart, CannotExecuteK8sVersion, @@ -128,6 +129,7 @@ impl From for Tag { errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile, errors::Tag::CannotCreateFile => Tag::CannotCreateFile, errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes, + errors::Tag::NotEnoughNodesAvailableToDeployEnvironment => Tag::NotEnoughNodesAvailableToDeployEnvironment, errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment, errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable, errors::Tag::ClusterHasNoWorkerNodes => Tag::ClusterHasNoWorkerNodes, diff --git a/src/errors/mod.rs b/src/errors/mod.rs index 17c04e51..64b43e19 100644 --- a/src/errors/mod.rs +++ b/src/errors/mod.rs @@ -189,6 +189,8 @@ pub enum Tag { CannotCreateFile, /// CannotGetClusterNodes: represents an error while trying to get cluster's nodes. CannotGetClusterNodes, + /// NotEnoughNodesAvailableToDeployEnvironment: represents an error when trying to deploy an environment but there the desired number of nodes exceeds the maximum value. + NotEnoughNodesAvailableToDeployEnvironment, /// NotEnoughResourcesToDeployEnvironment: represents an error when trying to deploy an environment but there are not enough resources available on the cluster. NotEnoughResourcesToDeployEnvironment, /// CannotUninstallHelmChart: represents an error when trying to uninstall an helm chart on the cluster, uninstallation couldn't be proceeded. @@ -814,6 +816,34 @@ impl EngineError { ) } + /// Creates new error for cannot deploy because the desired number of nodes is greater than the maximum allowed. + /// + /// Arguments: + /// + /// * `event_details`: Error linked event details. + /// * `actual_nodes`: The actual number of nodes running. + /// * `max_nodes`: The maximum number of nodes allowed. + pub fn new_cannot_deploy_max_nodes_exceeded( + event_details: EventDetails, + actual_nodes: i32, + max_nodes: i32, + ) -> EngineError { + let message = format!( + "The actual number of nodes {} can't be greater than the maximum value {}", + actual_nodes, max_nodes + ); + + EngineError::new( + event_details, + Tag::NotEnoughNodesAvailableToDeployEnvironment, + message.to_string(), + message, + None, + None, + Some("Consider to upgrade your nodes configuration.".to_string()), + ) + } + /// Creates new error for cannot deploy because there are not enough available resources on the cluster. /// /// Arguments: diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index d86f802f..51a2f58f 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -17,7 +17,9 @@ use std::str::FromStr; use std::sync::Arc; use tracing::error; -use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::common::{ + get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES, +}; use crate::dns::{dns_provider_cloudflare, dns_provider_qoverydns}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; @@ -66,6 +68,8 @@ pub fn aws_default_engine_config(context: &Context, logger: Box) -> cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ) } @@ -78,6 +82,8 @@ impl Cluster for AWS { kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, + min_nodes: i32, + max_nodes: i32, ) -> EngineConfig { // use ECR let container_registry = Box::new(container_registry_ecr(context, logger.clone())); @@ -101,6 +107,8 @@ impl Cluster for AWS { logger.clone(), localisation, vpc_network_mode, + min_nodes, + max_nodes, ); EngineConfig::new( @@ -148,9 +156,9 @@ impl Cluster for AWS { )) } - fn kubernetes_nodes() -> Vec { + fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec { vec![ - NodeGroups::new("groupeks0".to_string(), 5, 10, "t3a.large".to_string(), 100) + NodeGroups::new("groupeks0".to_string(), min_nodes, max_nodes, "t3a.large".to_string(), 100) .expect("Problem while setup EKS nodes"), ] } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index 1459c1e1..7a3a347c 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -53,6 +53,9 @@ use tracing::{span, Level}; use url::Url; use uuid::Uuid; +pub const KUBERNETES_MIN_NODES: i32 = 5; +pub const KUBERNETES_MAX_NODES: i32 = 10; + pub enum RegionActivationStatus { Deactivated, Activated, @@ -72,9 +75,11 @@ pub trait Cluster { kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, + min_nodes: i32, + max_nodes: i32, ) -> EngineConfig; fn cloud_provider(context: &Context) -> Box; - fn kubernetes_nodes() -> Vec; + fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec; fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option) -> U; } @@ -213,6 +218,7 @@ pub enum ClusterTestType { Classic, WithPause, WithUpgrade, + WithNodesResize, } pub fn environment_3_apps_3_routers_3_databases( @@ -1187,6 +1193,8 @@ pub fn test_db( cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Do => DO::docker_cr_engine( &context, @@ -1198,6 +1206,8 @@ pub fn test_db( cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Scw => Scaleway::docker_cr_engine( &context, @@ -1209,6 +1219,8 @@ pub fn test_db( cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), }; @@ -1275,6 +1287,8 @@ pub fn test_db( kubernetes_version, &ClusterDomain::Default { cluster_id }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Do => DO::docker_cr_engine( &context_for_delete, @@ -1284,6 +1298,8 @@ pub fn test_db( kubernetes_version, &ClusterDomain::Default { cluster_id }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Scw => Scaleway::docker_cr_engine( &context_for_delete, @@ -1293,6 +1309,8 @@ pub fn test_db( kubernetes_version, &ClusterDomain::Default { cluster_id }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), }; @@ -1311,6 +1329,8 @@ pub fn get_environment_test_kubernetes( logger: Box, localisation: &str, vpc_network_mode: Option, + min_nodes: i32, + max_nodes: i32, ) -> Box { let secrets = FuncTestsSecrets::new(); @@ -1334,7 +1354,7 @@ pub fn get_environment_test_kubernetes( cloud_provider, dns_provider, options, - AWS::kubernetes_nodes(), + AWS::kubernetes_nodes(min_nodes, max_nodes), logger, ) .unwrap(), @@ -1377,7 +1397,7 @@ pub fn get_environment_test_kubernetes( region, cloud_provider, dns_provider, - DO::kubernetes_nodes(), + DO::kubernetes_nodes(min_nodes, max_nodes), DO::kubernetes_cluster_options(secrets, Option::from(context.cluster_id().to_string())), logger, ) @@ -1396,7 +1416,7 @@ pub fn get_environment_test_kubernetes( zone, cloud_provider, dns_provider, - Scaleway::kubernetes_nodes(), + Scaleway::kubernetes_nodes(min_nodes, max_nodes), Scaleway::kubernetes_cluster_options(secrets, None), logger, ) @@ -1421,6 +1441,8 @@ pub fn get_cluster_test_kubernetes<'a>( dns_provider: Arc>, vpc_network_mode: Option, logger: Box, + min_nodes: i32, + max_nodes: i32, ) -> Box { let kubernetes: Box = match kubernetes_provider { KubernetesKind::Eks => { @@ -1443,7 +1465,7 @@ pub fn get_cluster_test_kubernetes<'a>( cloud_provider, dns_provider, options, - AWS::kubernetes_nodes(), + AWS::kubernetes_nodes(min_nodes, max_nodes), logger, ) .unwrap(), @@ -1485,7 +1507,7 @@ pub fn get_cluster_test_kubernetes<'a>( DoRegion::from_str(localisation).expect("Unknown region set for DOKS"), cloud_provider, dns_provider, - DO::kubernetes_nodes(), + DO::kubernetes_nodes(min_nodes, max_nodes), DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), logger, ) @@ -1501,7 +1523,7 @@ pub fn get_cluster_test_kubernetes<'a>( ScwZone::from_str(localisation).expect("Unknown zone set for Kapsule"), cloud_provider, dns_provider, - Scaleway::kubernetes_nodes(), + Scaleway::kubernetes_nodes(min_nodes, max_nodes), Scaleway::kubernetes_cluster_options(secrets, None), logger, ) @@ -1542,6 +1564,8 @@ pub fn cluster_test( boot_version, cluster_domain, vpc_network_mode.clone(), + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Do => DO::docker_cr_engine( &context, @@ -1551,6 +1575,8 @@ pub fn cluster_test( boot_version, cluster_domain, vpc_network_mode.clone(), + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Scw => Scaleway::docker_cr_engine( &context, @@ -1560,6 +1586,8 @@ pub fn cluster_test( boot_version, cluster_domain, vpc_network_mode.clone(), + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), }; let mut deploy_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); @@ -1611,6 +1639,7 @@ pub fn cluster_test( } match test_type { + // TODO new test type ClusterTestType::Classic => {} ClusterTestType::WithPause => { let mut pause_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); @@ -1651,6 +1680,8 @@ pub fn cluster_test( upgrade_to_version, cluster_domain, vpc_network_mode, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Do => DO::docker_cr_engine( &context, @@ -1660,6 +1691,8 @@ pub fn cluster_test( upgrade_to_version, cluster_domain, vpc_network_mode, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), Kind::Scw => Scaleway::docker_cr_engine( &context, @@ -1669,6 +1702,8 @@ pub fn cluster_test( upgrade_to_version, cluster_domain, vpc_network_mode, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ), }; let mut upgrade_tx = @@ -1700,6 +1735,67 @@ pub fn cluster_test( return test_name.to_string(); } + ClusterTestType::WithNodesResize => { + let min_nodes = 11; + let max_nodes = 15; + let kubernetes_version = format!("{}.{}", major_boot_version, minor_boot_version.clone()); + let engine = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation, + kubernetes_version, + cluster_domain, + vpc_network_mode.clone(), + min_nodes, + max_nodes, + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation, + kubernetes_version, + cluster_domain, + vpc_network_mode.clone(), + min_nodes, + max_nodes, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation, + kubernetes_version, + cluster_domain, + vpc_network_mode.clone(), + min_nodes, + max_nodes, + ), + }; + let mut upgrade_tx = + Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + let mut delete_tx = + Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); + // Upgrade + if let Err(err) = upgrade_tx.create_kubernetes() { + panic!("{:?}", err) + } + assert!(matches!(upgrade_tx.commit(), TransactionResult::Ok)); + if let Err(err) = metrics_server_test( + engine + .kubernetes() + .get_kubeconfig_file_path() + .expect("Unable to get config file path"), + engine.kubernetes().cloud_provider().credentials_environment_variables(), + ) { + panic!("{:?}", err) + } + // Delete + if let Err(err) = delete_tx.delete_kubernetes() { + panic!("{:?}", err) + } + assert!(matches!(delete_tx.commit(), TransactionResult::Ok)); + return test_name.to_string(); + } } // Destroy env if any diff --git a/test_utilities/src/digitalocean.rs b/test_utilities/src/digitalocean.rs index 23f1289d..a3c6ae65 100644 --- a/test_utilities/src/digitalocean.rs +++ b/test_utilities/src/digitalocean.rs @@ -11,7 +11,9 @@ use qovery_engine::engine::EngineConfig; use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use std::sync::Arc; -use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::common::{ + get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES, +}; use crate::dns::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; use qovery_engine::cloud_provider::qovery::EngineLocation; @@ -53,6 +55,8 @@ pub fn do_default_engine_config(context: &Context, logger: Box) -> E cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ) } @@ -65,6 +69,8 @@ impl Cluster for DO { kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, + min_nodes: i32, + max_nodes: i32, ) -> EngineConfig { // use DigitalOcean Container Registry let container_registry = Box::new(container_registry_digital_ocean(context)); @@ -84,6 +90,8 @@ impl Cluster for DO { logger.clone(), localisation, vpc_network_mode, + min_nodes, + max_nodes, ); EngineConfig::new( @@ -134,9 +142,9 @@ impl Cluster for DO { )) } - fn kubernetes_nodes() -> Vec { + fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec { vec![ - NodeGroups::new("groupdoks0".to_string(), 5, 10, "s-4vcpu-8gb".to_string(), 0) + NodeGroups::new("groupdoks0".to_string(), min_nodes, max_nodes, "s-4vcpu-8gb".to_string(), 0) .expect("Problem while setup DOKS nodes"), ] } diff --git a/test_utilities/src/scaleway.rs b/test_utilities/src/scaleway.rs index dfdf851a..4a6d8976 100644 --- a/test_utilities/src/scaleway.rs +++ b/test_utilities/src/scaleway.rs @@ -21,7 +21,9 @@ use qovery_engine::logger::Logger; use qovery_engine::models::scaleway::ScwZone; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; -use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; +use crate::common::{ + get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES, +}; use crate::dns::dns_provider_cloudflare; use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; @@ -75,6 +77,8 @@ pub fn scw_default_engine_config(context: &Context, logger: Box) -> cluster_id: context.cluster_id().to_string(), }, None, + KUBERNETES_MIN_NODES, + KUBERNETES_MAX_NODES, ) } @@ -87,6 +91,8 @@ impl Cluster for Scaleway { kubernetes_version: String, cluster_domain: &ClusterDomain, vpc_network_mode: Option, + min_nodes: i32, + max_nodes: i32, ) -> EngineConfig { // use Scaleway CR let container_registry = Box::new(container_registry_scw(context)); @@ -107,6 +113,8 @@ impl Cluster for Scaleway { logger.clone(), localisation, vpc_network_mode, + min_nodes, + max_nodes, ); EngineConfig::new( @@ -158,10 +166,12 @@ impl Cluster for Scaleway { )) } - fn kubernetes_nodes() -> Vec { + fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec { // Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L - vec![NodeGroups::new("groupscw0".to_string(), 5, 10, "dev1-l".to_string(), 0) - .expect("Problem while setup SCW nodes")] + vec![ + NodeGroups::new("groupscw0".to_string(), min_nodes, max_nodes, "dev1-l".to_string(), 0) + .expect("Problem while setup SCW nodes"), + ] } fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option) -> KapsuleOptions { diff --git a/tests/aws/aws_whole_enchilada.rs b/tests/aws/aws_whole_enchilada.rs index 754072bc..59980374 100644 --- a/tests/aws/aws_whole_enchilada.rs +++ b/tests/aws/aws_whole_enchilada.rs @@ -53,3 +53,45 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() { ) }) } + +#[cfg(feature = "test-aws-whole-enchilada")] +#[named] +#[test] +fn create_resize_and_destroy_eks_cluster_with_env_in_eu_west_3() { + let secrets = FuncTestsSecrets::new(); + + let region = secrets.AWS_DEFAULT_REGION.as_ref().expect("AWS region was not found"); + let aws_region = AwsRegion::from_str(region).expect("Wasn't able to convert the desired region"); + let aws_zones = aws_region.get_zones(); + + let organization_id = generate_id(); + let cluster_id = generate_cluster_id(aws_region.to_string().as_str()); + let context = context(organization_id.as_str(), cluster_id.as_str()); + + let cluster_domain = format!( + "{}.{}", + cluster_id.as_str(), + secrets + .DEFAULT_TEST_DOMAIN + .as_ref() + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + .as_str() + ); + + engine_run_test(|| { + cluster_test( + function_name!(), + Kind::Aws, + context.clone(), + logger(), + region, + Some(aws_zones), + ClusterTestType::WithNodesResize, + AWS_KUBERNETES_MAJOR_VERSION, + AWS_KUBERNETES_MINOR_VERSION, + &ClusterDomain::Custom(cluster_domain), + None, + None, + ) + }) +}