fix: better support node numbers

This commit is contained in:
Pierre Mavro
2021-10-22 20:09:34 +02:00
committed by Pierre Mavro
parent 9fbc4a512d
commit 17964dbae7
20 changed files with 425 additions and 342 deletions

View File

@@ -12,13 +12,14 @@ resource "aws_eks_node_group" "eks_cluster_workers_{{ loop.index }}" {
local.tags_eks,
{
"QoveryNodeGroupId" = "${var.kubernetes_cluster_id}-{{ loop.index }}"
"QoveryNodeGroupName" = "{{ eks_worker_node.name }}"
}
)
scaling_config {
desired_size = "{{ eks_worker_node.desired_size }}"
max_size = "{{ eks_worker_node.max_size }}"
min_size = "{{ eks_worker_node.min_size }}"
desired_size = "{{ eks_worker_node.min_nodes }}"
max_size = "{{ eks_worker_node.max_nodes }}"
min_size = "{{ eks_worker_node.min_nodes }}"
}
lifecycle {

View File

@@ -8,15 +8,15 @@ resource "digitalocean_kubernetes_cluster" "kubernetes_cluster" {
auto_upgrade = true
surge_upgrade = true
tags = local.tags_ks_list
tags = concat(local.tags_ks_list, ["QoveryNodeGroupName:{{ doks_worker_nodes[0].name }}", "QoveryNodeGroupId:${var.kubernetes_cluster_id}-0"])
node_pool {
tags = local.tags_ks_list
tags = concat(local.tags_ks_list, ["QoveryNodeGroupName:{{ doks_worker_nodes[0].name }}", "QoveryNodeGroupId:${var.kubernetes_cluster_id}-0"])
name = var.kubernetes_cluster_id
size = "{{ doks_worker_nodes[0].instance_type }}"
# use Digital Ocean built-in cluster autoscaler
auto_scale = true
min_nodes = "{{ doks_worker_nodes[0].min_size }}"
max_nodes = "{{ doks_worker_nodes[0].max_size }}"
min_nodes = "{{ doks_worker_nodes[0].min_nodes }}"
max_nodes = "{{ doks_worker_nodes[0].max_nodes }}"
}
}

View File

@@ -8,10 +8,10 @@ resource "digitalocean_kubernetes_node_pool" "app_node_pool_{{ loop.index }}" {
name = "qovery-{{kubernetes_cluster_id}}-{{ loop.index }}"
size = "{{ doks_worker_node.instance_type }}"
tags = local.tags_ks_list
tags = concat(local.tags_doks_list, ["QoveryNodeGroupId:${var.kubernetes_cluster_id}-{{ loop.index }}", "QoveryNodeGroupName:{{ eks_worker_node.name }}"])
auto_scale = true
min_nodes = "{{ doks_worker_node.min_size }}"
max_nodes = "{{ doks_worker_node.max_size }}"
min_nodes = {{ doks_worker_node.min_nodes }}
max_nodes = {{ doks_worker_node.max_nodes }}
depends_on = [
digitalocean_kubernetes_cluster.kubernetes_cluster,

View File

@@ -10,14 +10,14 @@ resource "scaleway_k8s_pool" "kubernetes_cluster_workers_{{ loop.index }}" {
# use Scaleway built-in cluster autoscaler
autoscaling = {{ scw_ks_pool_autoscale }}
autohealing = true
size = "{{ scw_ks_worker_node.min_size }}"
min_size = "{{ scw_ks_worker_node.min_size }}"
max_size = "{{ scw_ks_worker_node.max_size }}"
size = "{{ scw_ks_worker_node.min_nodes }}"
min_size = "{{ scw_ks_worker_node.min_nodes }}"
max_size = "{{ scw_ks_worker_node.max_nodes }}"
depends_on = [
scaleway_k8s_cluster.kubernetes_cluster,
]
tags = local.tags_ks_list
tags = concat(local.tags_ks_list, ["QoveryNodeGroupName:{{ scw_ks_worker_node.name }}", "QoveryNodeGroupId:${var.kubernetes_cluster_id}-{{ loop.index }}"])
}
{% endfor %}

View File

@@ -1,7 +1,8 @@
use core::fmt;
use std::env;
use std::path::PathBuf;
use std::str::FromStr;
use itertools::Itertools;
use retry::delay::{Fibonacci, Fixed};
use retry::Error::Operation;
use retry::OperationResult;
@@ -10,16 +11,16 @@ use serde::{Deserialize, Serialize};
use tera::Context as TeraContext;
use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::node::Node;
use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType;
use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create;
use crate::cloud_provider::aws::AWS;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, KubernetesNode, KubernetesNodesType,
is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, KubernetesNodesType,
KubernetesUpgradeStatus,
};
use crate::cloud_provider::models::WorkerNodeDataTemplate;
use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat};
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd;
@@ -43,8 +44,6 @@ use crate::models::{
use crate::object_storage::s3::S3;
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
use core::fmt;
use std::path::PathBuf;
pub mod helm_charts;
pub mod node;
@@ -117,7 +116,7 @@ pub struct EKS<'a> {
cloud_provider: &'a AWS,
dns_provider: &'a dyn DnsProvider,
s3: S3,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
template_directory: String,
options: Options,
listeners: Listeners,
@@ -134,10 +133,24 @@ impl<'a> EKS<'a> {
cloud_provider: &'a AWS,
dns_provider: &'a dyn DnsProvider,
options: Options,
nodes: Vec<Node>,
) -> Self {
nodes_groups: Vec<NodeGroups>,
) -> Result<Self, EngineError> {
let template_directory = format!("{}/aws/bootstrap", context.lib_root_dir());
for node_group in &nodes_groups {
if AwsInstancesType::from_str(node_group.instance_type.as_str()).is_err() {
return Err(EngineError::new(
EngineErrorCause::Internal,
EngineErrorScope::Engine,
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
)),
));
}
}
// TODO export this
let s3 = S3::new(
context.clone(),
@@ -147,7 +160,7 @@ impl<'a> EKS<'a> {
cloud_provider.secret_access_key.clone(),
);
EKS {
Ok(EKS {
context,
id: id.to_string(),
long_id,
@@ -158,10 +171,10 @@ impl<'a> EKS<'a> {
dns_provider,
s3,
options,
nodes,
nodes_groups,
template_directory,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
}
})
}
fn get_engine_location(&self) -> EngineLocation {
@@ -270,20 +283,6 @@ impl<'a> EKS<'a> {
let eks_access_cidr_blocks = format_ips(&self.options.eks_access_cidr_blocks);
let worker_nodes = self
.nodes
.iter()
.group_by(|e| e.instance_type())
.into_iter()
.map(|(instance_type, group)| (instance_type, group.collect::<Vec<_>>()))
.map(|(instance_type, nodes)| WorkerNodeDataTemplate {
instance_type: instance_type.to_string(),
desired_size: "3".to_string(),
max_size: nodes.len().to_string(),
min_size: "3".to_string(),
})
.collect::<Vec<WorkerNodeDataTemplate>>();
let qovery_api_url = self.options.qovery_api_url.clone();
let rds_cidr_subnet = self.options.rds_cidr_subnet.clone();
let documentdb_cidr_subnet = self.options.documentdb_cidr_subnet.clone();
@@ -417,7 +416,7 @@ impl<'a> EKS<'a> {
context.insert("kubernetes_cluster_name", &self.name());
context.insert("kubernetes_cluster_id", self.id());
context.insert("eks_region_cluster_id", region_cluster_id.as_str());
context.insert("eks_worker_nodes", &worker_nodes);
context.insert("eks_worker_nodes", &self.nodes_groups);
context.insert("eks_zone_a_subnet_blocks_private", &eks_zone_a_subnet_blocks_private);
context.insert("eks_zone_b_subnet_blocks_private", &eks_zone_b_subnet_blocks_private);
context.insert("eks_zone_c_subnet_blocks_private", &eks_zone_c_subnet_blocks_private);
@@ -1058,7 +1057,7 @@ impl<'a> Kubernetes for EKS<'a> {
let mut context = self.tera_context()?;
// pause: remove all worker nodes to reduce the bill but keep master to keep all the deployment config, certificates etc...
let worker_nodes: Vec<WorkerNodeDataTemplate> = Vec::new();
let worker_nodes: Vec<NodeGroupsFormat> = Vec::new();
context.insert("eks_worker_nodes", &worker_nodes);
let _ = cast_simple_error_to_engine_error(

View File

@@ -1,90 +1,92 @@
use std::any::Any;
use crate::cloud_provider::kubernetes::InstanceType;
use core::fmt;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::cloud_provider::kubernetes::KubernetesNode;
#[derive(Clone)]
pub struct Node {
instance_type: String,
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum AwsInstancesType {
T2Large, // 2 cores 8Gb RAM
T2Xlarge, // 4 cores 16Gb RAM
T3Large, // 2 cores 8Gb RAM
T3Xlarge, // 4 cores 16Gb RAM
T3aLarge, // 2 cores 8Gb RAM
T3a2xlarge, // 8 cores 32Gb RAM
}
impl Node {
/// Number of CPUs and total memory wanted - the right AWS EC2 instance type is found algorithmically
/// Eg. total_cpu = 1 and total_memory_in_gib = 2 means `t2.small` instance type
/// BUT total_cpu = 1 and total_memory_in_gib = 3 does not have an existing instance - so we will pick the upper closest,
/// which is `t2.medium` with 2 cpu and 4 GiB
/// ```
/// use qovery_engine::cloud_provider::aws::kubernetes::node::Node;
/// use qovery_engine::cloud_provider::kubernetes::KubernetesNode;
///
/// let node = Node::new_with_cpu_and_mem(2, 4);
/// assert_eq!(node.instance_type(), "t2.medium")
/// ```
pub fn new_with_cpu_and_mem(total_cpu: u8, total_memory_in_gib: u16) -> Self {
let instance_types_table = [
(1, 1, "t2.micro"),
(1, 2, "t2.small"),
(2, 4, "t2.medium"),
(2, 8, "t2.large"),
(4, 16, "t2.xlarge"),
(8, 32, "t2.2xlarge"),
// TODO add other instance types
];
if total_cpu == 0 || total_memory_in_gib == 0 {
let (_, _, instance_type) = instance_types_table.first().unwrap();
return Node::new(*instance_type);
impl InstanceType for AwsInstancesType {
fn to_cloud_provider_format(&self) -> String {
match self {
AwsInstancesType::T2Large => "t2.large",
AwsInstancesType::T2Xlarge => "t2x.large",
AwsInstancesType::T3Large => "t3.large",
AwsInstancesType::T3Xlarge => "t3x.large",
AwsInstancesType::T3aLarge => "t3a.large",
AwsInstancesType::T3a2xlarge => "t3a.2xlarge",
}
for (_cpu, mem, instance_type) in instance_types_table.iter() {
if total_memory_in_gib <= *mem {
return Node::new(*instance_type);
}
}
let (_, _, instance_type) = instance_types_table.last().unwrap();
Node::new(*instance_type)
.to_string()
}
}
pub fn new<T: Into<String>>(instance_type: T) -> Self {
Node {
instance_type: instance_type.into(),
impl AwsInstancesType {
pub fn as_str(&self) -> &str {
match self {
AwsInstancesType::T2Large => "t2.large",
AwsInstancesType::T2Xlarge => "t2x.large",
AwsInstancesType::T3Large => "t3.large",
AwsInstancesType::T3Xlarge => "t3x.large",
AwsInstancesType::T3aLarge => "t3a.large",
AwsInstancesType::T3a2xlarge => "t3a.2xlarge",
}
}
}
impl KubernetesNode for Node {
fn instance_type(&self) -> &str {
self.instance_type.as_str()
impl fmt::Display for AwsInstancesType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
AwsInstancesType::T2Large => write!(f, "t2.large"),
AwsInstancesType::T2Xlarge => write!(f, "t2x.large"),
AwsInstancesType::T3Large => write!(f, "t3.large"),
AwsInstancesType::T3Xlarge => write!(f, "t3x.large"),
AwsInstancesType::T3aLarge => write!(f, "t3a.large"),
AwsInstancesType::T3a2xlarge => write!(f, "t3a.2xlarge"),
}
}
}
fn as_any(&self) -> &dyn Any {
self
impl FromStr for AwsInstancesType {
type Err = ();
fn from_str(s: &str) -> Result<AwsInstancesType, ()> {
match s {
"t2.large" => Ok(AwsInstancesType::T2Large),
"t2x.large" => Ok(AwsInstancesType::T2Xlarge),
"t3.large" => Ok(AwsInstancesType::T3Large),
"t3x.large" => Ok(AwsInstancesType::T3Xlarge),
"t3a.large" => Ok(AwsInstancesType::T3aLarge),
"t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge),
_ => Err(()),
}
}
}
#[cfg(test)]
mod tests {
use crate::cloud_provider::aws::kubernetes::node::Node;
use crate::cloud_provider::kubernetes::KubernetesNode;
use crate::cloud_provider::models::NodeGroups;
#[test]
fn test_instance_types() {
assert_eq!(Node::new_with_cpu_and_mem(0, 0).instance_type(), "t2.micro");
assert_eq!(Node::new_with_cpu_and_mem(1, 0).instance_type(), "t2.micro");
assert_eq!(Node::new_with_cpu_and_mem(0, 1).instance_type(), "t2.micro");
assert_eq!(Node::new_with_cpu_and_mem(1, 1).instance_type(), "t2.micro");
assert_eq!(Node::new_with_cpu_and_mem(1, 2).instance_type(), "t2.small");
assert_eq!(Node::new_with_cpu_and_mem(2, 4).instance_type(), "t2.medium");
assert_eq!(Node::new_with_cpu_and_mem(2, 5).instance_type(), "t2.large");
assert_eq!(Node::new_with_cpu_and_mem(1, 6).instance_type(), "t2.large");
assert_eq!(Node::new_with_cpu_and_mem(1, 7).instance_type(), "t2.large");
assert_eq!(Node::new_with_cpu_and_mem(2, 8).instance_type(), "t2.large");
assert_eq!(Node::new_with_cpu_and_mem(3, 8).instance_type(), "t2.large");
assert_eq!(Node::new_with_cpu_and_mem(3, 10).instance_type(), "t2.xlarge");
assert_eq!(Node::new_with_cpu_and_mem(3, 12).instance_type(), "t2.xlarge");
assert_eq!(Node::new_with_cpu_and_mem(4, 16).instance_type(), "t2.xlarge");
assert_eq!(Node::new_with_cpu_and_mem(4, 17).instance_type(), "t2.2xlarge");
assert_eq!(Node::new_with_cpu_and_mem(8, 32).instance_type(), "t2.2xlarge");
assert_eq!(Node::new_with_cpu_and_mem(16, 64).instance_type(), "t2.2xlarge");
fn test_groups_nodes() {
assert!(NodeGroups::new("".to_string(), 2, 1, "t2.large".to_string()).is_err());
assert!(NodeGroups::new("".to_string(), 2, 2, "t2.large".to_string()).is_ok());
assert!(NodeGroups::new("".to_string(), 2, 3, "t2.large".to_string()).is_ok());
assert_eq!(
NodeGroups::new("".to_string(), 2, 2, "t2.large".to_string()).unwrap(),
NodeGroups {
name: "".to_string(),
min_nodes: 2,
max_nodes: 2,
instance_type: "t2.large".to_string()
}
);
}
}

View File

@@ -1,6 +1,5 @@
use std::env;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use tera::Context as TeraContext;
@@ -10,7 +9,7 @@ use crate::cloud_provider::digitalocean::kubernetes::doks_api::{
get_do_latest_doks_slug_from_api, get_doks_info_from_name,
};
use crate::cloud_provider::digitalocean::kubernetes::helm_charts::{do_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::digitalocean::kubernetes::node::Node;
use crate::cloud_provider::digitalocean::kubernetes::node::DoInstancesType;
use crate::cloud_provider::digitalocean::models::doks::KubernetesCluster;
use crate::cloud_provider::digitalocean::network::load_balancer::do_get_load_balancer_ip;
use crate::cloud_provider::digitalocean::network::vpc::{
@@ -19,8 +18,8 @@ use crate::cloud_provider::digitalocean::network::vpc::{
use crate::cloud_provider::digitalocean::DO;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo, ChartSetValue, HelmChartNamespaces};
use crate::cloud_provider::kubernetes::{uninstall_cert_manager, Kind, Kubernetes, KubernetesNode};
use crate::cloud_provider::models::WorkerNodeDataTemplate;
use crate::cloud_provider::kubernetes::{uninstall_cert_manager, Kind, Kubernetes};
use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd::helm::{helm_exec_upgrade_with_chart_info, helm_upgrade_diff_with_chart_info};
@@ -43,6 +42,7 @@ use retry::delay::Fibonacci;
use retry::Error::Operation;
use retry::OperationResult;
use std::path::PathBuf;
use std::str::FromStr;
pub mod cidr;
pub mod doks_api;
@@ -82,7 +82,7 @@ pub struct DOKS<'a> {
version: String,
region: Region,
cloud_provider: &'a DO,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
dns_provider: &'a dyn DnsProvider,
spaces: Spaces,
template_directory: String,
@@ -100,11 +100,25 @@ impl<'a> DOKS<'a> {
region: Region,
cloud_provider: &'a DO,
dns_provider: &'a dyn DnsProvider,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
options: DoksOptions,
) -> Self {
) -> Result<Self, EngineError> {
let template_directory = format!("{}/digitalocean/bootstrap", context.lib_root_dir());
for node_group in &nodes_groups {
if DoInstancesType::from_str(node_group.instance_type.as_str()).is_err() {
return Err(EngineError::new(
EngineErrorCause::Internal,
EngineErrorScope::Engine,
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
)),
));
}
}
let spaces = Spaces::new(
context.clone(),
"spaces-temp-id".to_string(),
@@ -114,7 +128,7 @@ impl<'a> DOKS<'a> {
region,
);
DOKS {
Ok(DOKS {
context,
id,
long_id,
@@ -125,10 +139,10 @@ impl<'a> DOKS<'a> {
dns_provider,
spaces,
options,
nodes,
nodes_groups,
template_directory,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
}
})
}
fn get_engine_location(&self) -> EngineLocation {
@@ -376,21 +390,7 @@ impl<'a> DOKS<'a> {
};
// kubernetes workers
let worker_nodes = self
.nodes
.iter()
.group_by(|e| e.instance_type())
.into_iter()
.map(|(instance_type, group)| (instance_type, group.collect::<Vec<_>>()))
.map(|(instance_type, nodes)| WorkerNodeDataTemplate {
instance_type: instance_type.to_string(),
desired_size: "3".to_string(),
max_size: nodes.len().to_string(),
min_size: "3".to_string(),
})
.collect::<Vec<WorkerNodeDataTemplate>>();
context.insert("doks_worker_nodes", &worker_nodes);
context.insert("doks_worker_nodes", &self.nodes_groups);
Ok(context)
}

View File

@@ -1,51 +1,97 @@
use std::any::Any;
use crate::cloud_provider::kubernetes::InstanceType;
use core::fmt;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use crate::cloud_provider::kubernetes::KubernetesNode;
#[derive(Clone)]
pub struct Node {
instance_type: String,
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum DoInstancesType {
S1vcpu1gb,
S1vcpu2gb,
S1vcpu3gb,
S2vcpu4gb,
S4vcpu8gb,
S6vcpu16gb,
S8vcpu32gb,
}
impl Node {
pub fn new_with_cpu_and_mem(total_cpu: u8, total_memory_in_gib: u16) -> Self {
let instance_types_table = [
(1, 1, "s-1vcpu-1gb"),
(1, 2, "s-1vcpu-2gb"),
(2, 4, "s-2vcpu-4gb"),
(4, 8, "s-4vcpu-8gb"),
(6, 16, "s-6vcpu-16gb"),
(8, 32, "s-8vcpu-32gb"),
];
if total_cpu == 0 || total_memory_in_gib == 0 {
let (_, _, instance_type) = instance_types_table.first().unwrap();
return Node::new(*instance_type);
impl InstanceType for DoInstancesType {
fn to_cloud_provider_format(&self) -> String {
match self {
DoInstancesType::S1vcpu1gb => "s-1vcpu-1gb",
DoInstancesType::S1vcpu2gb => "s-1vcpu-2gb",
DoInstancesType::S1vcpu3gb => "s-1vcpu-3gb",
DoInstancesType::S2vcpu4gb => "s-2vcpu-4gb",
DoInstancesType::S4vcpu8gb => "s-4vcpu-8gb",
DoInstancesType::S6vcpu16gb => "s-6vcpu-16gb",
DoInstancesType::S8vcpu32gb => "s-8vcpu-32gb",
}
.to_string()
}
}
for (_cpu, mem, instance_type) in instance_types_table.iter() {
if total_memory_in_gib <= *mem {
return Node::new(*instance_type);
impl DoInstancesType {
pub fn as_str(&self) -> &str {
match self {
DoInstancesType::S1vcpu1gb => "s-1vcpu-1gb",
DoInstancesType::S1vcpu2gb => "s-1vcpu-2gb",
DoInstancesType::S1vcpu3gb => "s-1vcpu-3gb",
DoInstancesType::S2vcpu4gb => "s-2vcpu-4gb",
DoInstancesType::S4vcpu8gb => "s-4vcpu-8gb",
DoInstancesType::S6vcpu16gb => "s-6vcpu-16gb",
DoInstancesType::S8vcpu32gb => "s-8vcpu-32gb",
}
}
}
impl fmt::Display for DoInstancesType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DoInstancesType::S1vcpu1gb => write!(f, "s-1vcpu-1gb"),
DoInstancesType::S1vcpu2gb => write!(f, "s-1vcpu-2gb"),
DoInstancesType::S1vcpu3gb => write!(f, "s-1vcpu-3gb"),
DoInstancesType::S2vcpu4gb => write!(f, "s-2vcpu-4gb"),
DoInstancesType::S4vcpu8gb => write!(f, "s-4vcpu-8gb"),
DoInstancesType::S6vcpu16gb => write!(f, "s-6vcpu-16gb"),
DoInstancesType::S8vcpu32gb => write!(f, "s-8vcpu-32gb"),
}
}
}
impl FromStr for DoInstancesType {
type Err = ();
fn from_str(s: &str) -> Result<DoInstancesType, ()> {
match s {
"s-1vcpu-1gb" => Ok(DoInstancesType::S1vcpu1gb),
"s-1vcpu-2gb" => Ok(DoInstancesType::S1vcpu2gb),
"s-1vcpu-3gb" => Ok(DoInstancesType::S1vcpu3gb),
"s-2vcpu-4gb" => Ok(DoInstancesType::S2vcpu4gb),
"s-4vcpu-8gb" => Ok(DoInstancesType::S4vcpu8gb),
"s-6vcpu-16gb" => Ok(DoInstancesType::S6vcpu16gb),
"s-8vcpu-32gb" => Ok(DoInstancesType::S8vcpu32gb),
_ => Err(()),
}
}
}
#[cfg(test)]
mod tests {
use crate::cloud_provider::models::NodeGroups;
#[test]
fn test_groups_nodes() {
assert!(NodeGroups::new("".to_string(), 2, 1, "s-2vcpu-4gb".to_string()).is_err());
assert!(NodeGroups::new("".to_string(), 2, 2, "s-2vcpu-4gb".to_string()).is_ok());
assert!(NodeGroups::new("".to_string(), 2, 3, "s-2vcpu-4gb".to_string()).is_ok());
assert_eq!(
NodeGroups::new("".to_string(), 2, 2, "s-2vcpu-4gb".to_string()).unwrap(),
NodeGroups {
name: "".to_string(),
min_nodes: 2,
max_nodes: 2,
instance_type: "s-2vcpu-4gb".to_string()
}
}
let (_, _, instance_type) = instance_types_table.last().unwrap();
Node::new(*instance_type)
}
pub fn new<T: Into<String>>(instance_type: T) -> Self {
Node {
instance_type: instance_type.into(),
}
}
}
impl KubernetesNode for Node {
fn instance_type(&self) -> &str {
self.instance_type.as_str()
}
fn as_any(&self) -> &dyn Any {
self
);
}
}

View File

@@ -2,6 +2,7 @@ use std::any::Any;
use std::fs::File;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::str::FromStr;
use std::thread;
use retry::delay::Fibonacci;
@@ -10,6 +11,7 @@ use retry::OperationResult;
use serde::{Deserialize, Serialize};
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::service::CheckAction;
use crate::cloud_provider::utilities::VersionsNumber;
use crate::cloud_provider::{service, CloudProvider, DeploymentTarget};
@@ -26,7 +28,6 @@ use crate::error::{
use crate::models::{Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, StringPath};
use crate::object_storage::ObjectStorage;
use crate::unit_conversion::{any_to_mi, cpu_string_to_float};
use std::str::FromStr;
pub trait Kubernetes: Listen {
fn context(&self) -> &Context;
@@ -875,14 +876,40 @@ pub fn compare_kubernetes_cluster_versions_for_upgrade(
Ok(upgrade_required)
}
pub trait InstanceType {
fn to_cloud_provider_format(&self) -> String;
}
impl NodeGroups {
pub fn new(group_name: String, min_nodes: i32, max_nodes: i32, instance_type: String) -> Result<Self, SimpleError> {
if min_nodes > max_nodes {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"The number of minimum nodes ({}) for group name {} is higher than maximum nodes ({})",
&group_name, &min_nodes, &max_nodes
)),
});
}
Ok(NodeGroups {
name: group_name,
min_nodes,
max_nodes,
instance_type,
})
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use crate::cloud_provider::kubernetes::{
check_kubernetes_upgrade_status, compare_kubernetes_cluster_versions_for_upgrade, KubernetesNodesType,
};
use crate::cloud_provider::utilities::VersionsNumber;
use crate::cmd::structs::{KubernetesList, KubernetesNode, KubernetesVersion};
use std::str::FromStr;
#[test]
pub fn check_kubernetes_upgrade_method() {

View File

@@ -1,13 +1,5 @@
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct WorkerNodeDataTemplate {
pub instance_type: String,
pub desired_size: String,
pub max_size: String,
pub min_size: String,
}
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct EnvironmentVariable {
pub key: String,
@@ -69,3 +61,19 @@ pub struct CpuLimits {
pub cpu_request: String,
pub cpu_limit: String,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct NodeGroups {
pub name: String,
pub min_nodes: i32,
pub max_nodes: i32,
pub instance_type: String,
}
#[derive(Serialize, Deserialize)]
pub struct NodeGroupsFormat {
pub name: String,
pub min_nodes: String,
pub max_nodes: String,
pub instance_type: String,
}

View File

@@ -4,13 +4,13 @@ pub mod node;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, KubernetesNode, KubernetesUpgradeStatus,
is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, KubernetesUpgradeStatus,
};
use crate::cloud_provider::models::WorkerNodeDataTemplate;
use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::scaleway::application::Zone;
use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::scaleway::kubernetes::node::Node;
use crate::cloud_provider::scaleway::kubernetes::node::ScwInstancesType;
use crate::cloud_provider::scaleway::Scaleway;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd::kubectl::kubectl_exec_get_all_namespaces;
@@ -28,13 +28,13 @@ use crate::object_storage::scaleway_object_storage::{BucketDeleteStrategy, Scale
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
use crate::{cmd, dns_provider};
use itertools::Itertools;
use retry::delay::Fibonacci;
use retry::Error::Operation;
use retry::OperationResult;
use serde::{Deserialize, Serialize};
use std::env;
use std::path::PathBuf;
use std::str::FromStr;
use tera::Context as TeraContext;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
@@ -112,7 +112,7 @@ pub struct Kapsule<'a> {
cloud_provider: &'a Scaleway,
dns_provider: &'a dyn DnsProvider,
object_storage: ScalewayOS,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
template_directory: String,
options: KapsuleOptions,
listeners: Listeners,
@@ -128,11 +128,25 @@ impl<'a> Kapsule<'a> {
zone: Zone,
cloud_provider: &'a Scaleway,
dns_provider: &'a dyn DnsProvider,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
options: KapsuleOptions,
) -> Kapsule<'a> {
) -> Result<Kapsule<'a>, EngineError> {
let template_directory = format!("{}/scaleway/bootstrap", context.lib_root_dir());
for node_group in &nodes_groups {
if ScwInstancesType::from_str(node_group.instance_type.as_str()).is_err() {
return Err(EngineError::new(
EngineErrorCause::Internal,
EngineErrorScope::Engine,
context.execution_id(),
Some(format!(
"Nodegroup instance type {} is not valid for {}",
node_group.instance_type, cloud_provider.name
)),
));
}
}
let object_storage = ScalewayOS::new(
context.clone(),
"s3-temp-id".to_string(),
@@ -144,7 +158,7 @@ impl<'a> Kapsule<'a> {
false,
);
Kapsule {
Ok(Kapsule {
context,
id,
long_id,
@@ -154,11 +168,11 @@ impl<'a> Kapsule<'a> {
cloud_provider,
dns_provider,
object_storage,
nodes,
nodes_groups,
template_directory,
options,
listeners: cloud_provider.listeners.clone(), // copy listeners from CloudProvider
}
})
}
fn get_engine_location(&self) -> EngineLocation {
@@ -310,20 +324,7 @@ impl<'a> Kapsule<'a> {
context.insert("grafana_admin_password", self.options.grafana_admin_password.as_str());
// Kubernetes workers
let worker_nodes = self
.nodes
.iter()
.group_by(|e| e.instance_type())
.into_iter()
.map(|(instance_type, group)| (instance_type, group.collect::<Vec<_>>()))
.map(|(instance_type, nodes)| WorkerNodeDataTemplate {
instance_type: instance_type.to_string().to_uppercase(),
desired_size: "3".to_string(),
max_size: nodes.len().to_string(),
min_size: "3".to_string(),
})
.collect::<Vec<WorkerNodeDataTemplate>>();
context.insert("scw_ks_worker_nodes", &worker_nodes);
context.insert("scw_ks_worker_nodes", &self.nodes_groups);
context.insert("scw_ks_pool_autoscale", &true);
Ok(context)

View File

@@ -1,110 +1,110 @@
use crate::cloud_provider::kubernetes::KubernetesNode;
use std::any::Any;
use crate::cloud_provider::kubernetes::InstanceType;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::str::FromStr;
#[derive(Clone)]
pub enum NodeType {
Gp1Xs, // 4 cores 16 Go RAM
Gp1S, // 8 cores 32 Go RAM
Gp1M, // 16 cores 64 Go RAM
Gp1L, // 32 cores 128 Go RAM
Gp1Xl, // 64 cores 256 Go RAM
Dev1M, // 3 cores 4 Go RAM
Dev1L, // 4 cores 8 Go RAM
Dev1Xl, // 4 cores 12 Go RAM
RenderS, // 10 cores 45 Go RAM 1 GPU 1 Go VRAM
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ScwInstancesType {
Gp1Xs, // 4 cores 16 Gb RAM
Gp1S, // 8 cores 32 Gb RAM
Gp1M, // 16 cores 64 Gb RAM
Gp1L, // 32 cores 128 Gb RAM
Gp1Xl, // 64 cores 256 Gb RAM
Dev1M, // 3 cores 4 Gb RAM
Dev1L, // 4 cores 8 Gb RAM
Dev1Xl, // 4 cores 12 Gb RAM
RenderS, // 10 cores 45 Gb RAM 1 GPU 1 Gb VRAM
}
impl NodeType {
impl InstanceType for ScwInstancesType {
fn to_cloud_provider_format(&self) -> String {
match self {
ScwInstancesType::Gp1Xs => "gp1-xs",
ScwInstancesType::Gp1S => "gp1-s",
ScwInstancesType::Gp1M => "gp1-m",
ScwInstancesType::Gp1L => "gp1-l",
ScwInstancesType::Gp1Xl => "gp1-xl",
ScwInstancesType::Dev1M => "dev1-m",
ScwInstancesType::Dev1L => "dev1-l",
ScwInstancesType::Dev1Xl => "dev1-xl",
ScwInstancesType::RenderS => "render-s",
}
.to_string()
}
}
impl ScwInstancesType {
pub fn as_str(&self) -> &str {
match self {
NodeType::Gp1Xs => "gp1-xs",
NodeType::Gp1S => "gp1-s",
NodeType::Gp1M => "gp1-m",
NodeType::Gp1L => "gp1-l",
NodeType::Gp1Xl => "gp1-xl",
NodeType::Dev1M => "dev1-m",
NodeType::Dev1L => "dev1-l",
NodeType::Dev1Xl => "dev1-xl",
NodeType::RenderS => "render-s",
ScwInstancesType::Gp1Xs => "gp1-xs",
ScwInstancesType::Gp1S => "gp1-s",
ScwInstancesType::Gp1M => "gp1-m",
ScwInstancesType::Gp1L => "gp1-l",
ScwInstancesType::Gp1Xl => "gp1-xl",
ScwInstancesType::Dev1M => "dev1-m",
ScwInstancesType::Dev1L => "dev1-l",
ScwInstancesType::Dev1Xl => "dev1-xl",
ScwInstancesType::RenderS => "render-s",
}
}
}
impl fmt::Display for NodeType {
impl fmt::Display for ScwInstancesType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
NodeType::Gp1Xs => write!(f, "gp1-xs"),
NodeType::Gp1S => write!(f, "gp1-s"),
NodeType::Gp1M => write!(f, "gp1-m"),
NodeType::Gp1L => write!(f, "gp1-l"),
NodeType::Gp1Xl => write!(f, "gp1-xl"),
NodeType::Dev1M => write!(f, "dev1-m"),
NodeType::Dev1L => write!(f, "dev1-l"),
NodeType::Dev1Xl => write!(f, "dev1-xl"),
NodeType::RenderS => write!(f, "render-s"),
ScwInstancesType::Gp1Xs => write!(f, "gp1-xs"),
ScwInstancesType::Gp1S => write!(f, "gp1-s"),
ScwInstancesType::Gp1M => write!(f, "gp1-m"),
ScwInstancesType::Gp1L => write!(f, "gp1-l"),
ScwInstancesType::Gp1Xl => write!(f, "gp1-xl"),
ScwInstancesType::Dev1M => write!(f, "dev1-m"),
ScwInstancesType::Dev1L => write!(f, "dev1-l"),
ScwInstancesType::Dev1Xl => write!(f, "dev1-xl"),
ScwInstancesType::RenderS => write!(f, "render-s"),
}
}
}
impl FromStr for NodeType {
impl FromStr for ScwInstancesType {
type Err = ();
fn from_str(s: &str) -> Result<NodeType, ()> {
fn from_str(s: &str) -> Result<ScwInstancesType, ()> {
match s {
"gp1-xs" => Ok(NodeType::Gp1Xs),
"gp1-s" => Ok(NodeType::Gp1S),
"gp1-m" => Ok(NodeType::Gp1M),
"gp1-l" => Ok(NodeType::Gp1L),
"gp1-xl" => Ok(NodeType::Gp1Xl),
"dev1-m" => Ok(NodeType::Dev1M),
"dev1-l" => Ok(NodeType::Dev1L),
"dev1-xl" => Ok(NodeType::Dev1Xl),
"render-s" => Ok(NodeType::RenderS),
"gp1-xs" => Ok(ScwInstancesType::Gp1Xs),
"gp1-s" => Ok(ScwInstancesType::Gp1S),
"gp1-m" => Ok(ScwInstancesType::Gp1M),
"gp1-l" => Ok(ScwInstancesType::Gp1L),
"gp1-xl" => Ok(ScwInstancesType::Gp1Xl),
"dev1-m" => Ok(ScwInstancesType::Dev1M),
"dev1-l" => Ok(ScwInstancesType::Dev1L),
"dev1-xl" => Ok(ScwInstancesType::Dev1Xl),
"render-s" => Ok(ScwInstancesType::RenderS),
_ => Err(()),
}
}
}
#[derive(Clone)]
pub struct Node {
node_type: NodeType,
}
#[cfg(test)]
mod tests {
#[cfg(test)]
mod tests {
use crate::cloud_provider::models::NodeGroups;
impl Node {
pub fn new(node_type: NodeType) -> Node {
Node {
node_type: node_type.clone(),
#[test]
fn test_groups_nodes() {
assert!(NodeGroups::new("".to_string(), 2, 1, "dev1-l".to_string()).is_err());
assert!(NodeGroups::new("".to_string(), 2, 2, "dev1-l".to_string()).is_ok());
assert!(NodeGroups::new("".to_string(), 2, 3, "dev1-l".to_string()).is_ok());
assert_eq!(
NodeGroups::new("".to_string(), 2, 2, "dev1-l".to_string()).unwrap(),
NodeGroups {
name: "".to_string(),
min_nodes: 2,
max_nodes: 2,
instance_type: "dev1-l".to_string()
}
);
}
}
}
impl KubernetesNode for Node {
fn instance_type(&self) -> &str {
self.node_type.as_str()
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[cfg(test)]
mod tests {
use crate::cloud_provider::kubernetes::KubernetesNode;
use crate::cloud_provider::scaleway::kubernetes::node::{Node, NodeType};
#[test]
fn test_node_types() {
assert_eq!(Node::new(NodeType::Dev1M).instance_type(), "dev1-m");
assert_eq!(Node::new(NodeType::Dev1L).instance_type(), "dev1-l");
assert_eq!(Node::new(NodeType::Dev1Xl).instance_type(), "dev1-xl");
assert_eq!(Node::new(NodeType::Gp1Xs).instance_type(), "gp1-xs");
assert_eq!(Node::new(NodeType::Gp1S).instance_type(), "gp1-s");
assert_eq!(Node::new(NodeType::Gp1M).instance_type(), "gp1-m");
assert_eq!(Node::new(NodeType::Gp1L).instance_type(), "gp1-l");
assert_eq!(Node::new(NodeType::Gp1Xl).instance_type(), "gp1-xl");
assert_eq!(Node::new(NodeType::RenderS).instance_type(), "render-s");
}
}

View File

@@ -1,10 +1,12 @@
extern crate serde;
extern crate serde_derive;
use tracing::error;
use qovery_engine::cloud_provider::aws::kubernetes::node::Node;
use qovery_engine::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode, EKS};
use qovery_engine::cloud_provider::aws::AWS;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::docker_hub::DockerHub;
use qovery_engine::container_registry::ecr::ECR;
@@ -14,7 +16,6 @@ use qovery_engine::models::Context;
use crate::cloudflare::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, FuncTestsSecrets};
use qovery_engine::cloud_provider::qovery::EngineLocation::ClientSide;
pub const AWS_QOVERY_ORGANIZATION_ID: &str = "u8nb94c7fwxzr2jt";
pub const AWS_REGION_FOR_S3: &str = "eu-west-3";
@@ -53,19 +54,9 @@ pub fn container_registry_docker_hub(context: &Context) -> DockerHub {
)
}
pub fn aws_kubernetes_nodes() -> Vec<Node> {
vec![
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
Node::new_with_cpu_and_mem(2, 8),
]
pub fn aws_kubernetes_nodes() -> Vec<NodeGroups> {
vec![NodeGroups::new("groupeks0".to_string(), 5, 10, "t3a.large".to_string())
.expect("Problem while setup EKS nodes")]
}
pub fn cloud_provider_aws(context: &Context) -> AWS {
@@ -173,7 +164,7 @@ pub fn aws_kubernetes_eks<'a>(
context: &Context,
cloud_provider: &'a AWS,
dns_provider: &'a dyn DnsProvider,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
) -> EKS<'a> {
let secrets = FuncTestsSecrets::new();
EKS::<'a>::new(
@@ -186,8 +177,9 @@ pub fn aws_kubernetes_eks<'a>(
cloud_provider,
dns_provider,
eks_options(secrets),
nodes,
nodes_groups,
)
.unwrap()
}
pub fn docker_ecr_aws_engine(context: &Context) -> Engine {

View File

@@ -1,9 +1,9 @@
use qovery_engine::build_platform::Image;
use qovery_engine::cloud_provider::digitalocean::kubernetes::node::Node;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DoksOptions;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::cloud_provider::digitalocean::network::vpc::VpcInitKind;
use qovery_engine::cloud_provider::digitalocean::DO;
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::TerraformStateCredentials;
use qovery_engine::container_registry::docr::DOCR;
use qovery_engine::dns_provider::DnsProvider;
@@ -61,7 +61,7 @@ pub fn do_kubernetes_ks<'a>(
context: &Context,
cloud_provider: &'a DO,
dns_provider: &'a dyn DnsProvider,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
region: Region,
) -> DOKS<'a> {
let secrets = FuncTestsSecrets::new();
@@ -74,17 +74,17 @@ pub fn do_kubernetes_ks<'a>(
region,
cloud_provider,
dns_provider,
nodes,
nodes_groups,
do_kubernetes_cluster_options(secrets, DO_KUBE_TEST_CLUSTER_ID.to_string()),
)
.unwrap()
}
pub fn do_kubernetes_nodes() -> Vec<Node> {
scw_kubernetes_custom_nodes(10, Node::new_with_cpu_and_mem(4, 8))
}
pub fn scw_kubernetes_custom_nodes(count: usize, node: Node) -> Vec<Node> {
vec![node.clone(); count]
pub fn do_kubernetes_nodes() -> Vec<NodeGroups> {
vec![
NodeGroups::new("groupdoks0".to_string(), 5, 10, "s-4vcpu-8gb".to_string())
.expect("Problem while setup DOKS nodes"),
]
}
pub fn cloud_provider_digitalocean(context: &Context) -> DO {

View File

@@ -1,6 +1,5 @@
use qovery_engine::build_platform::Image;
use qovery_engine::cloud_provider::scaleway::application::Zone;
use qovery_engine::cloud_provider::scaleway::kubernetes::node::{Node, NodeType};
use qovery_engine::cloud_provider::scaleway::kubernetes::{Kapsule, KapsuleOptions};
use qovery_engine::cloud_provider::scaleway::Scaleway;
use qovery_engine::cloud_provider::TerraformStateCredentials;
@@ -15,6 +14,7 @@ use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use crate::cloudflare::dns_provider_cloudflare;
use crate::utilities::{build_platform_local_docker, generate_id, FuncTestsSecrets};
use qovery_engine::cloud_provider::models::NodeGroups;
use qovery_engine::cloud_provider::qovery::EngineLocation;
use tracing::error;
@@ -147,13 +147,9 @@ pub fn scw_object_storage(context: Context, region: Zone) -> ScalewayOS {
)
}
pub fn scw_kubernetes_nodes() -> Vec<Node> {
pub fn scw_kubernetes_nodes() -> Vec<NodeGroups> {
// Note: Dev1M is a bit too small to handle engine + local docker, hence using Dev1L
scw_kubernetes_custom_nodes(10, NodeType::Dev1L)
}
pub fn scw_kubernetes_custom_nodes(count: usize, node_type: NodeType) -> Vec<Node> {
vec![Node::new(node_type); count]
vec![NodeGroups::new("groupscw0".to_string(), 5, 10, "dev1-l".to_string()).expect("Problem while setup SCW nodes")]
}
pub fn docker_scw_cr_engine(context: &Context) -> Engine {
@@ -181,7 +177,7 @@ pub fn scw_kubernetes_kapsule<'a>(
context: &Context,
cloud_provider: &'a Scaleway,
dns_provider: &'a dyn DnsProvider,
nodes: Vec<Node>,
nodes_groups: Vec<NodeGroups>,
zone: Zone,
) -> Kapsule<'a> {
let secrets = FuncTestsSecrets::new();
@@ -194,9 +190,10 @@ pub fn scw_kubernetes_kapsule<'a>(
zone,
cloud_provider,
dns_provider,
nodes,
nodes_groups,
scw_kubernetes_cluster_options(secrets),
)
.unwrap()
}
pub fn deploy_environment(context: &Context, environment_action: EnvironmentAction, zone: Zone) -> TransactionResult {

View File

@@ -45,7 +45,8 @@ fn create_upgrade_and_destroy_eks_cluster(
&cloudflare,
eks_options(secrets.clone()),
nodes.clone(),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
@@ -69,7 +70,8 @@ fn create_upgrade_and_destroy_eks_cluster(
&cloudflare,
eks_options(secrets),
nodes,
);
)
.unwrap();
if let Err(err) = tx.create_kubernetes(&kubernetes) {
panic!("{:?}", err)
}
@@ -130,7 +132,8 @@ fn create_and_destroy_eks_cluster(
&cloudflare,
eks_options,
nodes,
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {

View File

@@ -47,7 +47,8 @@ fn create_upgrade_and_destroy_doks_cluster(
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(secrets, cluster_id),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
@@ -115,7 +116,8 @@ fn create_and_destroy_doks_cluster(region: Region, secrets: FuncTestsSecrets, te
&cloudflare,
nodes,
test_utilities::digitalocean::do_kubernetes_cluster_options(secrets, cluster_id),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {

View File

@@ -48,7 +48,8 @@ fn create_digitalocean_kubernetes_doks_test_cluster() {
secrets,
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
@@ -104,7 +105,8 @@ fn destroy_digitalocean_kubernetes_doks_test_cluster() {
secrets,
test_utilities::digitalocean::DO_KUBE_TEST_CLUSTER_NAME.to_string(),
),
);
)
.unwrap();
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {

View File

@@ -47,7 +47,8 @@ fn create_upgrade_and_destroy_kapsule_cluster(
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
@@ -115,7 +116,8 @@ fn create_and_destroy_kapsule_cluster(zone: Zone, secrets: FuncTestsSecrets, tes
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {

View File

@@ -5,7 +5,6 @@ use self::test_utilities::utilities::{context, engine_run_test, init, FuncTestsS
use ::function_name::named;
use tracing::{span, Level};
use qovery_engine::cloud_provider::scaleway::kubernetes::node::NodeType;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::transaction::TransactionResult;
@@ -32,7 +31,7 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() {
let mut tx = session.transaction();
let scw_cluster = test_utilities::scaleway::cloud_provider_scaleway(&context);
let nodes = test_utilities::scaleway::scw_kubernetes_custom_nodes(10, NodeType::Gp1S);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let cloudflare = dns_provider_cloudflare(&context);
let kubernetes = Kapsule::new(
@@ -46,7 +45,8 @@ fn create_scaleway_kubernetes_kapsule_test_cluster() {
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
);
)
.unwrap();
// Deploy
if let Err(err) = tx.create_kubernetes(&kubernetes) {
@@ -99,7 +99,8 @@ fn destroy_scaleway_kubernetes_kapsule_test_cluster() {
&cloudflare,
nodes,
test_utilities::scaleway::scw_kubernetes_cluster_options(secrets),
);
)
.unwrap();
// Destroy
if let Err(err) = tx.delete_kubernetes(&kubernetes) {