This commit is contained in:
Bilel Benamira
2022-05-16 16:09:20 +02:00
parent 7937370f51
commit 2341163fb2
10 changed files with 378 additions and 20 deletions

View File

@@ -18,14 +18,16 @@ resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" {
) )
scaling_config { scaling_config {
desired_size = "{{ eks_worker_node.min_nodes }}" desired_size = "{{ eks_worker_desired_nodes }}"
max_size = "{{ eks_worker_node.max_nodes }}" max_size = "{{ eks_worker_node.max_nodes }}"
min_size = "{{ eks_worker_node.min_nodes }}" min_size = "{{ eks_worker_node.min_nodes }}"
} }
lifecycle { lifecycle {
// don't update the desired size and let the cluster-autoscaler do the job // don't update the desired size and let the cluster-autoscaler do the job
{% if not eks_worker_update_desired_nodes %}
ignore_changes = [scaling_config[0].desired_size] ignore_changes = [scaling_config[0].desired_size]
{% endif %}
create_before_destroy = true create_before_destroy = true
} }

View File

@@ -1,6 +1,7 @@
use core::fmt; use core::fmt;
use std::env; use std::env;
use std::path::Path; use std::path::Path;
use std::str::FromStr;
use retry::delay::{Fibonacci, Fixed}; use retry::delay::{Fibonacci, Fixed};
use retry::Error::Operation; use retry::Error::Operation;
@@ -24,7 +25,9 @@ use crate::cloud_provider::utilities::{wait_until_port_is_open, TcpCheckSource};
use crate::cloud_provider::CloudProvider; use crate::cloud_provider::CloudProvider;
use crate::cmd; use crate::cmd;
use crate::cmd::helm::{to_engine_error, Helm}; use crate::cmd::helm::{to_engine_error, Helm};
use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events}; use crate::cmd::kubectl::{
kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events, kubectl_exec_get_node,
};
use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list}; use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list};
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces}; use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider::DnsProvider; use crate::dns_provider::DnsProvider;
@@ -32,7 +35,11 @@ use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity};
use crate::events::{EngineEvent, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter}; use crate::events::{EngineEvent, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter};
use crate::io_models::{Context, Features, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString}; use crate::io_models::{Context, Features, ListenersHelper, QoveryIdentifier, ToHelmString, ToTerraformString};
use crate::object_storage::s3::S3; use crate::object_storage::s3::S3;
use crate::runtime::block_on;
use crate::string::terraform_list_format; use crate::string::terraform_list_format;
use rusoto_core::credential::StaticProvider;
use rusoto_core::{Client, HttpClient, Region as RusotoRegion};
use rusoto_eks::{DescribeNodegroupRequest, Eks, EksClient, ListNodegroupsRequest, NodegroupScalingConfig};
pub mod ec2; pub mod ec2;
mod ec2_helm_charts; mod ec2_helm_charts;
@@ -380,7 +387,10 @@ fn tera_context(
match env::var_os("VAULT_SECRET_ID") { match env::var_os("VAULT_SECRET_ID") {
Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()), Some(secret_id) => context.insert("vault_secret_id", secret_id.to_str().unwrap()),
None => kubernetes.logger().log(EngineEvent::Error( None => kubernetes.logger().log(EngineEvent::Error(
EngineError::new_missing_required_env_variable(event_details, "VAULT_SECRET_ID".to_string()), EngineError::new_missing_required_env_variable(
event_details.clone(),
"VAULT_SECRET_ID".to_string(),
),
None, None,
)), )),
} }
@@ -428,6 +438,12 @@ fn tera_context(
.as_str(), .as_str(),
); );
let (update_desired_node, desired_nodes) = match should_update_desired_nodes(event_details, kubernetes, node_groups)
{
Err(e) => return Err(e),
Ok(value) => value,
};
context.insert("aws_region", &kubernetes.region()); context.insert("aws_region", &kubernetes.region());
context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates"); context.insert("aws_terraform_backend_bucket", "qovery-terrafom-tfstates");
context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates"); context.insert("aws_terraform_backend_dynamodb_table", "qovery-terrafom-tfstates");
@@ -444,6 +460,8 @@ fn tera_context(
context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id()); context.insert("kubernetes_full_cluster_id", kubernetes.context().cluster_id());
context.insert("eks_region_cluster_id", region_cluster_id.as_str()); context.insert("eks_region_cluster_id", region_cluster_id.as_str());
context.insert("eks_worker_nodes", &node_groups); context.insert("eks_worker_nodes", &node_groups);
context.insert("eks_worker_desired_nodes", &desired_nodes);
context.insert("eks_worker_update_desired_nodes", &update_desired_node);
context.insert("ec2_zone_a_subnet_blocks_private", &ec2_zone_a_subnet_blocks_private); context.insert("ec2_zone_a_subnet_blocks_private", &ec2_zone_a_subnet_blocks_private);
context.insert("ec2_zone_b_subnet_blocks_private", &ec2_zone_b_subnet_blocks_private); context.insert("ec2_zone_b_subnet_blocks_private", &ec2_zone_b_subnet_blocks_private);
context.insert("ec2_zone_c_subnet_blocks_private", &ec2_zone_c_subnet_blocks_private); context.insert("ec2_zone_c_subnet_blocks_private", &ec2_zone_c_subnet_blocks_private);
@@ -497,6 +515,132 @@ fn tera_context(
Ok(context) Ok(context)
} }
/// Returns a tuple of (update_desired_node: bool, desired_nodes_count: i32).
fn should_update_desired_nodes(
event_details: EventDetails,
kubernetes: &dyn Kubernetes,
node_groups: &[NodeGroups],
) -> Result<(bool, i32), EngineError> {
let cloud_provider = kubernetes.cloud_provider();
let future_node_group = match node_groups.is_empty() {
false => node_groups.first().expect("unable to get node_group"),
true => {
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details,
Some(CommandError::new_from_safe_message(
"Could not find node_group in terra context".to_string(),
)),
));
}
};
let scaling_config = match get_node_scaling_config(event_details.clone(), kubernetes) {
Ok(value) => match value {
Some(v) => v,
None => return Ok((false, future_node_group.min_nodes)),
},
Err(error) => return Err(error),
};
let should_update_desired_nodes = scaling_config.min_size.unwrap_or_default()
!= i64::from(future_node_group.min_nodes)
|| scaling_config.max_size.unwrap_or_default() != i64::from(future_node_group.max_nodes);
let kubeconfig = match kubernetes.get_kubeconfig_file() {
Ok((path, _)) => path,
Err(_) => return Ok((false, scaling_config.desired_size.unwrap() as i32)),
};
let get_node_result = retry::retry(Fixed::from_millis(10000).take(5), || {
match kubectl_exec_get_node(kubeconfig.clone(), cloud_provider.credentials_environment_variables().clone()) {
Err(e) => OperationResult::Retry(e),
Ok(nodes) => OperationResult::Ok(nodes.items.len() as i32),
}
});
let actual_nodes_count = match get_node_result {
Ok(value) => value,
Err(Operation { error, .. }) => {
return Err(EngineError::new_cluster_has_no_worker_nodes(event_details, Some(error)));
}
Err(retry::Error::Internal(e)) => {
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details,
Some(CommandError::new_from_safe_message(e)),
));
}
};
match future_node_group.get_desired_nodes(event_details, actual_nodes_count) {
Ok(desired_nodes) => Ok((should_update_desired_nodes, desired_nodes)),
Err(error) => Err(error),
}
}
/// Returns a rusoto eks client using the current configuration.
fn get_rusoto_eks_client(event_details: EventDetails, kubernetes: &dyn Kubernetes) -> Result<EksClient, EngineError> {
let cloud_provider = kubernetes.cloud_provider();
let region = match RusotoRegion::from_str(&kubernetes.region()) {
Ok(value) => value,
Err(error) => {
return Err(EngineError::new_unsupported_region(
event_details,
"".to_string(),
CommandError::new_from_safe_message(error.to_string()),
));
}
};
let credentials =
StaticProvider::new(cloud_provider.access_key_id(), cloud_provider.secret_access_key(), None, None);
let client = Client::new_with(credentials, HttpClient::new().expect("unable to create new Http client"));
Ok(EksClient::new_with_client(client, region))
}
/// Returns the scaling config of a node_group by node_group_name.
fn get_node_scaling_config(
event_details: EventDetails,
kubernetes: &dyn Kubernetes,
) -> Result<Option<NodegroupScalingConfig>, EngineError> {
let eks_client = match get_rusoto_eks_client(event_details.clone(), kubernetes) {
Ok(value) => value,
Err(error) => return Err(error),
};
let node_groups = match block_on(eks_client.list_nodegroups(ListNodegroupsRequest {
cluster_name: kubernetes.cluster_name(),
..Default::default()
})) {
Ok(res) => res.nodegroups.unwrap_or_default(),
Err(_) => return Ok(Option::None),
};
if node_groups.is_empty() {
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details,
Some(CommandError::new_from_safe_message(format!(
"Could not find node_groups for cluster {}",
kubernetes.cluster_name(),
))),
));
}
let actual_nodes_group = match block_on(eks_client.describe_nodegroup(DescribeNodegroupRequest {
cluster_name: kubernetes.cluster_name(),
nodegroup_name: node_groups.first().unwrap().to_string(),
})) {
Ok(res) => res.nodegroup.unwrap_or_default(),
Err(error) => {
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details,
Some(CommandError::new_from_safe_message(error.to_string())),
));
}
};
Ok(actual_nodes_group.scaling_config)
}
fn create( fn create(
kubernetes: &dyn Kubernetes, kubernetes: &dyn Kubernetes,
kubernetes_long_id: uuid::Uuid, kubernetes_long_id: uuid::Uuid,

View File

@@ -1,3 +1,5 @@
use crate::errors::EngineError;
use crate::events::EventDetails;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Clone, Eq, PartialEq, Hash)] #[derive(Clone, Eq, PartialEq, Hash)]
@@ -72,6 +74,20 @@ pub struct NodeGroups {
pub disk_size_in_gib: i32, pub disk_size_in_gib: i32,
} }
impl NodeGroups {
pub fn get_desired_nodes(&self, event_details: EventDetails, actual_nodes_count: i32) -> Result<i32, EngineError> {
if actual_nodes_count > self.max_nodes {
Result::Err(EngineError::new_cannot_deploy_max_nodes_exceeded(
event_details,
actual_nodes_count,
self.max_nodes,
))
} else {
Result::Ok(self.max_nodes)
}
}
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct NodeGroupsFormat { pub struct NodeGroupsFormat {
pub name: String, pub name: String,

View File

@@ -30,6 +30,7 @@ pub enum Tag {
CannotRetrieveClusterConfigFile, CannotRetrieveClusterConfigFile,
CannotCreateFile, CannotCreateFile,
CannotGetClusterNodes, CannotGetClusterNodes,
NotEnoughNodesAvailableToDeployEnvironment,
NotEnoughResourcesToDeployEnvironment, NotEnoughResourcesToDeployEnvironment,
CannotUninstallHelmChart, CannotUninstallHelmChart,
CannotExecuteK8sVersion, CannotExecuteK8sVersion,
@@ -128,6 +129,7 @@ impl From<errors::Tag> for Tag {
errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile, errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile,
errors::Tag::CannotCreateFile => Tag::CannotCreateFile, errors::Tag::CannotCreateFile => Tag::CannotCreateFile,
errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes, errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes,
errors::Tag::NotEnoughNodesAvailableToDeployEnvironment => Tag::NotEnoughNodesAvailableToDeployEnvironment,
errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment, errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment,
errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable, errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable,
errors::Tag::ClusterHasNoWorkerNodes => Tag::ClusterHasNoWorkerNodes, errors::Tag::ClusterHasNoWorkerNodes => Tag::ClusterHasNoWorkerNodes,

View File

@@ -189,6 +189,8 @@ pub enum Tag {
CannotCreateFile, CannotCreateFile,
/// CannotGetClusterNodes: represents an error while trying to get cluster's nodes. /// CannotGetClusterNodes: represents an error while trying to get cluster's nodes.
CannotGetClusterNodes, CannotGetClusterNodes,
/// NotEnoughNodesAvailableToDeployEnvironment: represents an error when trying to deploy an environment but there the desired number of nodes exceeds the maximum value.
NotEnoughNodesAvailableToDeployEnvironment,
/// NotEnoughResourcesToDeployEnvironment: represents an error when trying to deploy an environment but there are not enough resources available on the cluster. /// NotEnoughResourcesToDeployEnvironment: represents an error when trying to deploy an environment but there are not enough resources available on the cluster.
NotEnoughResourcesToDeployEnvironment, NotEnoughResourcesToDeployEnvironment,
/// CannotUninstallHelmChart: represents an error when trying to uninstall an helm chart on the cluster, uninstallation couldn't be proceeded. /// CannotUninstallHelmChart: represents an error when trying to uninstall an helm chart on the cluster, uninstallation couldn't be proceeded.
@@ -814,6 +816,34 @@ impl EngineError {
) )
} }
/// Creates new error for cannot deploy because the desired number of nodes is greater than the maximum allowed.
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `actual_nodes`: The actual number of nodes running.
/// * `max_nodes`: The maximum number of nodes allowed.
pub fn new_cannot_deploy_max_nodes_exceeded(
event_details: EventDetails,
actual_nodes: i32,
max_nodes: i32,
) -> EngineError {
let message = format!(
"The actual number of nodes {} can't be greater than the maximum value {}",
actual_nodes, max_nodes
);
EngineError::new(
event_details,
Tag::NotEnoughNodesAvailableToDeployEnvironment,
message.to_string(),
message,
None,
None,
Some("Consider to upgrade your nodes configuration.".to_string()),
)
}
/// Creates new error for cannot deploy because there are not enough available resources on the cluster. /// Creates new error for cannot deploy because there are not enough available resources on the cluster.
/// ///
/// Arguments: /// Arguments:

View File

@@ -17,7 +17,9 @@ use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use tracing::error; use tracing::error;
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::common::{
get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES,
};
use crate::dns::{dns_provider_cloudflare, dns_provider_qoverydns}; use crate::dns::{dns_provider_cloudflare, dns_provider_qoverydns};
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
@@ -66,6 +68,8 @@ pub fn aws_default_engine_config(context: &Context, logger: Box<dyn Logger>) ->
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
) )
} }
@@ -78,6 +82,8 @@ impl Cluster<AWS, Options> for AWS {
kubernetes_version: String, kubernetes_version: String,
cluster_domain: &ClusterDomain, cluster_domain: &ClusterDomain,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
min_nodes: i32,
max_nodes: i32,
) -> EngineConfig { ) -> EngineConfig {
// use ECR // use ECR
let container_registry = Box::new(container_registry_ecr(context, logger.clone())); let container_registry = Box::new(container_registry_ecr(context, logger.clone()));
@@ -101,6 +107,8 @@ impl Cluster<AWS, Options> for AWS {
logger.clone(), logger.clone(),
localisation, localisation,
vpc_network_mode, vpc_network_mode,
min_nodes,
max_nodes,
); );
EngineConfig::new( EngineConfig::new(
@@ -148,9 +156,9 @@ impl Cluster<AWS, Options> for AWS {
)) ))
} }
fn kubernetes_nodes() -> Vec<NodeGroups> { fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec<NodeGroups> {
vec![ vec![
NodeGroups::new("groupeks0".to_string(), 5, 10, "t3a.large".to_string(), 100) NodeGroups::new("groupeks0".to_string(), min_nodes, max_nodes, "t3a.large".to_string(), 100)
.expect("Problem while setup EKS nodes"), .expect("Problem while setup EKS nodes"),
] ]
} }

View File

@@ -53,6 +53,9 @@ use tracing::{span, Level};
use url::Url; use url::Url;
use uuid::Uuid; use uuid::Uuid;
pub const KUBERNETES_MIN_NODES: i32 = 5;
pub const KUBERNETES_MAX_NODES: i32 = 10;
pub enum RegionActivationStatus { pub enum RegionActivationStatus {
Deactivated, Deactivated,
Activated, Activated,
@@ -72,9 +75,11 @@ pub trait Cluster<T, U> {
kubernetes_version: String, kubernetes_version: String,
cluster_domain: &ClusterDomain, cluster_domain: &ClusterDomain,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
min_nodes: i32,
max_nodes: i32,
) -> EngineConfig; ) -> EngineConfig;
fn cloud_provider(context: &Context) -> Box<T>; fn cloud_provider(context: &Context) -> Box<T>;
fn kubernetes_nodes() -> Vec<NodeGroups>; fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec<NodeGroups>;
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option<String>) -> U; fn kubernetes_cluster_options(secrets: FuncTestsSecrets, cluster_id: Option<String>) -> U;
} }
@@ -213,6 +218,7 @@ pub enum ClusterTestType {
Classic, Classic,
WithPause, WithPause,
WithUpgrade, WithUpgrade,
WithNodesResize,
} }
pub fn environment_3_apps_3_routers_3_databases( pub fn environment_3_apps_3_routers_3_databases(
@@ -1187,6 +1193,8 @@ pub fn test_db(
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Do => DO::docker_cr_engine( Kind::Do => DO::docker_cr_engine(
&context, &context,
@@ -1198,6 +1206,8 @@ pub fn test_db(
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Scw => Scaleway::docker_cr_engine( Kind::Scw => Scaleway::docker_cr_engine(
&context, &context,
@@ -1209,6 +1219,8 @@ pub fn test_db(
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
}; };
@@ -1275,6 +1287,8 @@ pub fn test_db(
kubernetes_version, kubernetes_version,
&ClusterDomain::Default { cluster_id }, &ClusterDomain::Default { cluster_id },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Do => DO::docker_cr_engine( Kind::Do => DO::docker_cr_engine(
&context_for_delete, &context_for_delete,
@@ -1284,6 +1298,8 @@ pub fn test_db(
kubernetes_version, kubernetes_version,
&ClusterDomain::Default { cluster_id }, &ClusterDomain::Default { cluster_id },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Scw => Scaleway::docker_cr_engine( Kind::Scw => Scaleway::docker_cr_engine(
&context_for_delete, &context_for_delete,
@@ -1293,6 +1309,8 @@ pub fn test_db(
kubernetes_version, kubernetes_version,
&ClusterDomain::Default { cluster_id }, &ClusterDomain::Default { cluster_id },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
}; };
@@ -1311,6 +1329,8 @@ pub fn get_environment_test_kubernetes(
logger: Box<dyn Logger>, logger: Box<dyn Logger>,
localisation: &str, localisation: &str,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
min_nodes: i32,
max_nodes: i32,
) -> Box<dyn Kubernetes> { ) -> Box<dyn Kubernetes> {
let secrets = FuncTestsSecrets::new(); let secrets = FuncTestsSecrets::new();
@@ -1334,7 +1354,7 @@ pub fn get_environment_test_kubernetes(
cloud_provider, cloud_provider,
dns_provider, dns_provider,
options, options,
AWS::kubernetes_nodes(), AWS::kubernetes_nodes(min_nodes, max_nodes),
logger, logger,
) )
.unwrap(), .unwrap(),
@@ -1377,7 +1397,7 @@ pub fn get_environment_test_kubernetes(
region, region,
cloud_provider, cloud_provider,
dns_provider, dns_provider,
DO::kubernetes_nodes(), DO::kubernetes_nodes(min_nodes, max_nodes),
DO::kubernetes_cluster_options(secrets, Option::from(context.cluster_id().to_string())), DO::kubernetes_cluster_options(secrets, Option::from(context.cluster_id().to_string())),
logger, logger,
) )
@@ -1396,7 +1416,7 @@ pub fn get_environment_test_kubernetes(
zone, zone,
cloud_provider, cloud_provider,
dns_provider, dns_provider,
Scaleway::kubernetes_nodes(), Scaleway::kubernetes_nodes(min_nodes, max_nodes),
Scaleway::kubernetes_cluster_options(secrets, None), Scaleway::kubernetes_cluster_options(secrets, None),
logger, logger,
) )
@@ -1421,6 +1441,8 @@ pub fn get_cluster_test_kubernetes<'a>(
dns_provider: Arc<Box<dyn DnsProvider>>, dns_provider: Arc<Box<dyn DnsProvider>>,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
logger: Box<dyn Logger>, logger: Box<dyn Logger>,
min_nodes: i32,
max_nodes: i32,
) -> Box<dyn Kubernetes + 'a> { ) -> Box<dyn Kubernetes + 'a> {
let kubernetes: Box<dyn Kubernetes> = match kubernetes_provider { let kubernetes: Box<dyn Kubernetes> = match kubernetes_provider {
KubernetesKind::Eks => { KubernetesKind::Eks => {
@@ -1443,7 +1465,7 @@ pub fn get_cluster_test_kubernetes<'a>(
cloud_provider, cloud_provider,
dns_provider, dns_provider,
options, options,
AWS::kubernetes_nodes(), AWS::kubernetes_nodes(min_nodes, max_nodes),
logger, logger,
) )
.unwrap(), .unwrap(),
@@ -1485,7 +1507,7 @@ pub fn get_cluster_test_kubernetes<'a>(
DoRegion::from_str(localisation).expect("Unknown region set for DOKS"), DoRegion::from_str(localisation).expect("Unknown region set for DOKS"),
cloud_provider, cloud_provider,
dns_provider, dns_provider,
DO::kubernetes_nodes(), DO::kubernetes_nodes(min_nodes, max_nodes),
DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)), DO::kubernetes_cluster_options(secrets, Option::from(cluster_name)),
logger, logger,
) )
@@ -1501,7 +1523,7 @@ pub fn get_cluster_test_kubernetes<'a>(
ScwZone::from_str(localisation).expect("Unknown zone set for Kapsule"), ScwZone::from_str(localisation).expect("Unknown zone set for Kapsule"),
cloud_provider, cloud_provider,
dns_provider, dns_provider,
Scaleway::kubernetes_nodes(), Scaleway::kubernetes_nodes(min_nodes, max_nodes),
Scaleway::kubernetes_cluster_options(secrets, None), Scaleway::kubernetes_cluster_options(secrets, None),
logger, logger,
) )
@@ -1542,6 +1564,8 @@ pub fn cluster_test(
boot_version, boot_version,
cluster_domain, cluster_domain,
vpc_network_mode.clone(), vpc_network_mode.clone(),
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Do => DO::docker_cr_engine( Kind::Do => DO::docker_cr_engine(
&context, &context,
@@ -1551,6 +1575,8 @@ pub fn cluster_test(
boot_version, boot_version,
cluster_domain, cluster_domain,
vpc_network_mode.clone(), vpc_network_mode.clone(),
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Scw => Scaleway::docker_cr_engine( Kind::Scw => Scaleway::docker_cr_engine(
&context, &context,
@@ -1560,6 +1586,8 @@ pub fn cluster_test(
boot_version, boot_version,
cluster_domain, cluster_domain,
vpc_network_mode.clone(), vpc_network_mode.clone(),
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
}; };
let mut deploy_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut deploy_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap();
@@ -1611,6 +1639,7 @@ pub fn cluster_test(
} }
match test_type { match test_type {
// TODO new test type
ClusterTestType::Classic => {} ClusterTestType::Classic => {}
ClusterTestType::WithPause => { ClusterTestType::WithPause => {
let mut pause_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap(); let mut pause_tx = Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap();
@@ -1651,6 +1680,8 @@ pub fn cluster_test(
upgrade_to_version, upgrade_to_version,
cluster_domain, cluster_domain,
vpc_network_mode, vpc_network_mode,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Do => DO::docker_cr_engine( Kind::Do => DO::docker_cr_engine(
&context, &context,
@@ -1660,6 +1691,8 @@ pub fn cluster_test(
upgrade_to_version, upgrade_to_version,
cluster_domain, cluster_domain,
vpc_network_mode, vpc_network_mode,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
Kind::Scw => Scaleway::docker_cr_engine( Kind::Scw => Scaleway::docker_cr_engine(
&context, &context,
@@ -1669,6 +1702,8 @@ pub fn cluster_test(
upgrade_to_version, upgrade_to_version,
cluster_domain, cluster_domain,
vpc_network_mode, vpc_network_mode,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
), ),
}; };
let mut upgrade_tx = let mut upgrade_tx =
@@ -1700,6 +1735,67 @@ pub fn cluster_test(
return test_name.to_string(); return test_name.to_string();
} }
ClusterTestType::WithNodesResize => {
let min_nodes = 11;
let max_nodes = 15;
let kubernetes_version = format!("{}.{}", major_boot_version, minor_boot_version.clone());
let engine = match provider_kind {
Kind::Aws => AWS::docker_cr_engine(
&context,
logger.clone(),
localisation,
kubernetes_version,
cluster_domain,
vpc_network_mode.clone(),
min_nodes,
max_nodes,
),
Kind::Do => DO::docker_cr_engine(
&context,
logger.clone(),
localisation,
kubernetes_version,
cluster_domain,
vpc_network_mode.clone(),
min_nodes,
max_nodes,
),
Kind::Scw => Scaleway::docker_cr_engine(
&context,
logger.clone(),
localisation,
kubernetes_version,
cluster_domain,
vpc_network_mode.clone(),
min_nodes,
max_nodes,
),
};
let mut upgrade_tx =
Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap();
let mut delete_tx =
Transaction::new(&engine, logger.clone(), Box::new(|| false), Box::new(|_| {})).unwrap();
// Upgrade
if let Err(err) = upgrade_tx.create_kubernetes() {
panic!("{:?}", err)
}
assert!(matches!(upgrade_tx.commit(), TransactionResult::Ok));
if let Err(err) = metrics_server_test(
engine
.kubernetes()
.get_kubeconfig_file_path()
.expect("Unable to get config file path"),
engine.kubernetes().cloud_provider().credentials_environment_variables(),
) {
panic!("{:?}", err)
}
// Delete
if let Err(err) = delete_tx.delete_kubernetes() {
panic!("{:?}", err)
}
assert!(matches!(delete_tx.commit(), TransactionResult::Ok));
return test_name.to_string();
}
} }
// Destroy env if any // Destroy env if any

View File

@@ -11,7 +11,9 @@ use qovery_engine::engine::EngineConfig;
use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener}; use qovery_engine::io_models::{Context, EnvironmentRequest, NoOpProgressListener};
use std::sync::Arc; use std::sync::Arc;
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::common::{
get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES,
};
use crate::dns::dns_provider_cloudflare; use crate::dns::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
use qovery_engine::cloud_provider::qovery::EngineLocation; use qovery_engine::cloud_provider::qovery::EngineLocation;
@@ -53,6 +55,8 @@ pub fn do_default_engine_config(context: &Context, logger: Box<dyn Logger>) -> E
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
) )
} }
@@ -65,6 +69,8 @@ impl Cluster<DO, DoksOptions> for DO {
kubernetes_version: String, kubernetes_version: String,
cluster_domain: &ClusterDomain, cluster_domain: &ClusterDomain,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
min_nodes: i32,
max_nodes: i32,
) -> EngineConfig { ) -> EngineConfig {
// use DigitalOcean Container Registry // use DigitalOcean Container Registry
let container_registry = Box::new(container_registry_digital_ocean(context)); let container_registry = Box::new(container_registry_digital_ocean(context));
@@ -84,6 +90,8 @@ impl Cluster<DO, DoksOptions> for DO {
logger.clone(), logger.clone(),
localisation, localisation,
vpc_network_mode, vpc_network_mode,
min_nodes,
max_nodes,
); );
EngineConfig::new( EngineConfig::new(
@@ -134,9 +142,9 @@ impl Cluster<DO, DoksOptions> for DO {
)) ))
} }
fn kubernetes_nodes() -> Vec<NodeGroups> { fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec<NodeGroups> {
vec![ vec![
NodeGroups::new("groupdoks0".to_string(), 5, 10, "s-4vcpu-8gb".to_string(), 0) NodeGroups::new("groupdoks0".to_string(), min_nodes, max_nodes, "s-4vcpu-8gb".to_string(), 0)
.expect("Problem while setup DOKS nodes"), .expect("Problem while setup DOKS nodes"),
] ]
} }

View File

@@ -21,7 +21,9 @@ use qovery_engine::logger::Logger;
use qovery_engine::models::scaleway::ScwZone; use qovery_engine::models::scaleway::ScwZone;
use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS}; use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS};
use crate::common::{get_environment_test_kubernetes, Cluster, ClusterDomain}; use crate::common::{
get_environment_test_kubernetes, Cluster, ClusterDomain, KUBERNETES_MAX_NODES, KUBERNETES_MIN_NODES,
};
use crate::dns::dns_provider_cloudflare; use crate::dns::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets}; use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets};
@@ -75,6 +77,8 @@ pub fn scw_default_engine_config(context: &Context, logger: Box<dyn Logger>) ->
cluster_id: context.cluster_id().to_string(), cluster_id: context.cluster_id().to_string(),
}, },
None, None,
KUBERNETES_MIN_NODES,
KUBERNETES_MAX_NODES,
) )
} }
@@ -87,6 +91,8 @@ impl Cluster<Scaleway, KapsuleOptions> for Scaleway {
kubernetes_version: String, kubernetes_version: String,
cluster_domain: &ClusterDomain, cluster_domain: &ClusterDomain,
vpc_network_mode: Option<VpcQoveryNetworkMode>, vpc_network_mode: Option<VpcQoveryNetworkMode>,
min_nodes: i32,
max_nodes: i32,
) -> EngineConfig { ) -> EngineConfig {
// use Scaleway CR // use Scaleway CR
let container_registry = Box::new(container_registry_scw(context)); let container_registry = Box::new(container_registry_scw(context));
@@ -107,6 +113,8 @@ impl Cluster<Scaleway, KapsuleOptions> for Scaleway {
logger.clone(), logger.clone(),
localisation, localisation,
vpc_network_mode, vpc_network_mode,
min_nodes,
max_nodes,
); );
EngineConfig::new( EngineConfig::new(
@@ -158,10 +166,12 @@ impl Cluster<Scaleway, KapsuleOptions> for Scaleway {
)) ))
} }
fn kubernetes_nodes() -> Vec<NodeGroups> { fn kubernetes_nodes(min_nodes: i32, max_nodes: i32) -> Vec<NodeGroups> {
// Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L // Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L
vec![NodeGroups::new("groupscw0".to_string(), 5, 10, "dev1-l".to_string(), 0) vec![
.expect("Problem while setup SCW nodes")] NodeGroups::new("groupscw0".to_string(), min_nodes, max_nodes, "dev1-l".to_string(), 0)
.expect("Problem while setup SCW nodes"),
]
} }
fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option<String>) -> KapsuleOptions { fn kubernetes_cluster_options(secrets: FuncTestsSecrets, _cluster_name: Option<String>) -> KapsuleOptions {

View File

@@ -53,3 +53,45 @@ fn create_upgrade_and_destroy_eks_cluster_with_env_in_eu_west_3() {
) )
}) })
} }
#[cfg(feature = "test-aws-whole-enchilada")]
#[named]
#[test]
fn create_resize_and_destroy_eks_cluster_with_env_in_eu_west_3() {
let secrets = FuncTestsSecrets::new();
let region = secrets.AWS_DEFAULT_REGION.as_ref().expect("AWS region was not found");
let aws_region = AwsRegion::from_str(region).expect("Wasn't able to convert the desired region");
let aws_zones = aws_region.get_zones();
let organization_id = generate_id();
let cluster_id = generate_cluster_id(aws_region.to_string().as_str());
let context = context(organization_id.as_str(), cluster_id.as_str());
let cluster_domain = format!(
"{}.{}",
cluster_id.as_str(),
secrets
.DEFAULT_TEST_DOMAIN
.as_ref()
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
.as_str()
);
engine_run_test(|| {
cluster_test(
function_name!(),
Kind::Aws,
context.clone(),
logger(),
region,
Some(aws_zones),
ClusterTestType::WithNodesResize,
AWS_KUBERNETES_MAJOR_VERSION,
AWS_KUBERNETES_MINOR_VERSION,
&ClusterDomain::Custom(cluster_domain),
None,
None,
)
})
}