fix: helm charts and missing elements for Qovery DNS

This commit is contained in:
Pierre Mavro
2022-05-08 19:00:41 +02:00
parent a1910c4361
commit 600b64f3f9
26 changed files with 446 additions and 139 deletions

View File

@@ -0,0 +1,22 @@
provider: {{ external_dns_provider }}
{% if external_dns_provider == "cloudflare" %}
cloudflare:
apiToken: "{{ cloudflare_api_token }}"
email: "{{ cloudflare_email }}"
proxied: {{ cloudflare_proxied|default(value="false") }}
{% elif external_dns_provider == "pdns" %}
# Qovery DNS
pdns:
apiUrl: "{{ qoverydns_api_url }}"
apiPort: "{{ qoverydns_api_port }}"
apiKey: "{{ qoverydns_api_key }}"
{% endif %}
domainFilters: {{ managed_dns_domains_root_helm_format }}
triggerLoopOnEvent: true
policy: sync
txtOwnerId: "{{ kubernetes_cluster_id }}"
txtPrefix: "qvy-{{ kubernetes_cluster_id }}-"
replicas: 1

View File

@@ -0,0 +1,8 @@
extraArgs:
kubelet-preferred-address-types: InternalIP,ExternalIP,Hostname
kubelet-use-node-status-port: true
metric-resolution: 15s
cert-dir: /tmp
apiService:
create: true

View File

@@ -0,0 +1,30 @@
controller:
useComponentLabel: true
config:
proxy-body-size: 100m
server-tokens: "false"
ingressClass: nginx-qovery
extraArgs:
default-ssl-certificate: "cert-manager/letsencrypt-acme-qovery-cert"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
autoscaling:
enabled: true
minReplicas: 2
maxReplicas: 11
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
publishService:
enabled: true
service:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
external-dns.alpha.kubernetes.io/hostname: "{{ wildcard_managed_dns }}"
externalTrafficPolicy: "Local"
sessionAffinity: ""
healthCheckNodePort: 0

View File

@@ -11,10 +11,16 @@ data:
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
hosts /etc/coredns/NodeHosts {
ttl 60
reload 15s
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf
cache 30

View File

@@ -27,12 +27,21 @@ resource "aws_security_group" "ec2_instance" {
// kubernetes
ingress {
description = "Kubernetes connectivity"
description = "Kubernetes access"
from_port = random_integer.kubernetes_external_port.result
protocol = "tcp"
to_port = random_integer.kubernetes_external_port.result
cidr_blocks = ["0.0.0.0/0"]
}
// SSH
ingress {
description = "SSH access"
from_port = 22
protocol = "tcp"
to_port = 22
cidr_blocks = ["0.0.0.0/0"]
}
tags = local.tags_ec2
}

View File

@@ -16,13 +16,18 @@ data "aws_ami" "debian" {
owners = [var.ec2_image_info.owners]
}
resource "aws_key_pair" "qovery_ssh_key" {
key_name = "qovery-key"
public_key = "{{ qovery_ssh_key }}"
}
resource "aws_instance" "ec2_instance" {
ami = data.aws_ami.debian.id
instance_type = var.ec2_instance.instance_type
# disk
root_block_device {
volume_size = "30" # GiB
volume_size = var.ec2_instance.disk_size_in_gb # GiB
volume_type = "gp2"
encrypted = true
}
@@ -34,13 +39,16 @@ resource "aws_instance" "ec2_instance" {
vpc_security_group_ids = [aws_security_group.ec2_instance.id]
subnet_id = aws_subnet.ec2_zone_a[0].id
user_data = local.bootstrap
user_data_replace_on_change = true
# ssh
key_name = aws_key_pair.qovery_ssh_key.key_name
# lifecycle {
# // user data changes, forces to restart the EC2 instance
# ignore_changes = [user_data]
# }
# k3s install
user_data = local.bootstrap
user_data_replace_on_change = false
lifecycle {
// avoid user data changes, forces to restart the EC2 instance
ignore_changes = [user_data]
}
tags = merge(
local.tags_common,

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
#set -x
total_args=$#
awk=awk
sed=sed
if [ "$(uname)" == "Darwin" ] ; then
awk='gawk'
sed='gsed'
fi
function help() {
echo "Usage: $0 <command> <args>"
grep '##' $0 | grep -v grep | $sed -r "s/^function\s(\w+).+##\s*(.+)$/\1| \2/g" | $awk 'BEGIN {FS = "|"}; {printf "\033[36m%-30s\033[0m %s\n", $1, $2}'
}
function check_args() {
num_args=$(($1+1))
if [[ $total_args -ne $num_args ]]; then
echo "Illegal number of parameters, expected $num_args"
exit 2
fi
}
function get_connection_details() { ## print environment variables to connect to cluster
echo 'export AWS_ACCESS_KEY_ID="{{ aws_access_key }}"'
echo 'export AWS_SECRET_ACCESS_KEY="{{ aws_secret_key }}"'
echo 'export AWS_DEFAULT_REGION="{{ aws_region }}"'
echo 'export KUBECONFIG=../../object-storage/s3/default-s3/{{ s3_kubeconfig_bucket }}/{{ kubernetes_cluster_id }}.yaml'
}
case $1 in
get_connection_details)
get_connection_details
;;
*)
help
exit 1
;;
esac

View File

@@ -70,7 +70,8 @@ variable "ec2_image_info" {
variable "ec2_instance" {
description = "EC2 instance configuration"
default = {
"instance_type" = "t3.micro"
"instance_type" = "{{ eks_worker_nodes[0].instance_type }}"
"disk_size_in_gb" = "{{ eks_worker_nodes[0].disk_size_in_gib }}"
}
type = map(string)
}
@@ -80,7 +81,7 @@ variable "k3s_config" {
default = {
"version" = "v1.20.15+k3s1"
"channel" = "stable"
"exec" = "--disable=traefik"
"exec" = "--disable=traefik --disable=metrics-server"
}
type = map(string)
}

View File

@@ -1,20 +1,23 @@
use crate::cloud_provider;
use crate::cloud_provider::aws::kubernetes;
use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType;
use crate::cloud_provider::aws::kubernetes::Options;
use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones};
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::kubernetes::{send_progress_on_long_task, Kind, Kubernetes, KubernetesUpgradeStatus};
use crate::cloud_provider::models::{InstanceEc2, NodeGroups};
use crate::cloud_provider::utilities::print_action;
use crate::cloud_provider::CloudProvider;
use crate::dns_provider::DnsProvider;
use crate::errors::EngineError;
use crate::events::{EnvironmentStep, InfrastructureStep, Stage};
use crate::events::{EngineEvent, EnvironmentStep, InfrastructureStep, Stage};
use crate::io_models::{Action, Context, Listen, Listener, Listeners};
use crate::logger::Logger;
use crate::object_storage::s3::S3;
use crate::object_storage::ObjectStorage;
use function_name::named;
use std::borrow::Borrow;
use std::str::FromStr;
use std::sync::Arc;
/// EC2 kubernetes provider allowing to deploy a cluster on single EC2 node.
@@ -31,6 +34,7 @@ pub struct EC2 {
s3: S3,
template_directory: String,
options: Options,
instance: InstanceEc2,
listeners: Listeners,
logger: Box<dyn Logger>,
}
@@ -47,6 +51,7 @@ impl EC2 {
cloud_provider: Arc<Box<dyn CloudProvider>>,
dns_provider: Arc<Box<dyn DnsProvider>>,
options: Options,
instance: InstanceEc2,
logger: Box<dyn Logger>,
) -> Result<Self, EngineError> {
let event_details = kubernetes::event_details(&**cloud_provider, id, name, &region, &context);
@@ -54,6 +59,12 @@ impl EC2 {
let aws_zones = kubernetes::aws_zones(zones, &region, &event_details)?;
let s3 = kubernetes::s3(&context, &region, &**cloud_provider);
if let Err(e) = AwsInstancesType::from_str(instance.instance_type.as_str()) {
let err = EngineError::new_unsupported_instance_type(event_details, instance.instance_type.as_str(), e);
logger.log(EngineEvent::Error(err.clone(), None));
return Err(err);
}
// copy listeners from CloudProvider
let listeners = cloud_provider.listeners().clone();
@@ -69,6 +80,7 @@ impl EC2 {
dns_provider,
s3,
options,
instance,
template_directory,
logger,
listeners,
@@ -82,6 +94,17 @@ impl EC2 {
fn struct_name(&self) -> &str {
"kubernetes"
}
fn node_group_from_instance_type(&self) -> NodeGroups {
NodeGroups::new(
"instance".to_string(),
1,
1,
self.instance.instance_type.clone(),
self.instance.disk_size_in_gib,
)
.expect("wrong instance type for EC2") // using expect here as it has already been validated during instantiation
}
}
impl Kubernetes for EC2 {
@@ -154,7 +177,7 @@ impl Kubernetes for EC2 {
self.long_id,
self.template_directory.as_str(),
&self.zones,
&[],
&[self.node_group_from_instance_type()],
&self.options,
)
})

View File

@@ -1,7 +1,8 @@
use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use crate::cloud_provider::helm::{
get_chart_for_cert_manager, get_chart_for_cluster_agent, get_chart_for_shell_agent, ChartInfo, ChartSetValue,
ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, ShellAgentContext,
get_chart_for_cert_manager_config, get_chart_for_cluster_agent, get_chart_for_shell_agent, ChartInfo,
ChartSetValue, ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces,
ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName};
use crate::dns_provider::DnsProviderConfiguration;
@@ -101,15 +102,6 @@ pub fn ec2_aws_helm_charts(
},
};
// Calico for AWS
let aws_calico = CommonChart {
chart_info: ChartInfo {
name: "calico".to_string(),
path: chart_path("charts/aws-calico"),
..Default::default()
},
};
let coredns_config = CoreDNSConfigChart {
chart_info: ChartInfo {
name: "coredns".to_string(),
@@ -137,21 +129,32 @@ pub fn ec2_aws_helm_charts(
values_files: vec![chart_path("chart_values/external-dns.yaml")],
values: vec![
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
value: "30Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "50Mi".to_string(),
value: "30Mi".to_string(),
},
],
..Default::default()
},
};
let metrics_server = CommonChart {
chart_info: ChartInfo {
name: "metrics-server".to_string(),
path: chart_path("common/charts/metrics-server"),
values_files: vec![chart_path("chart_values/metrics-server.yaml")],
values: vec![
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "30Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "30Mi".to_string(),
},
],
..Default::default()
@@ -189,62 +192,38 @@ pub fn ec2_aws_helm_charts(
value: "qovery".to_string(),
},
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "1Gi".to_string(),
value: "50Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "1Gi".to_string(),
value: "50Mi".to_string(),
},
// Webhooks resources limits
ChartSetValue {
key: "webhook.resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "webhook.resources.limits.memory".to_string(),
value: "128Mi".to_string(),
value: "64Mi".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.memory".to_string(),
value: "128Mi".to_string(),
value: "64Mi".to_string(),
},
// Cainjector resources limits
ChartSetValue {
key: "cainjector.resources.limits.cpu".to_string(),
value: "500m".to_string(),
},
ChartSetValue {
key: "cainjector.resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "cainjector.resources.limits.memory".to_string(),
value: "1Gi".to_string(),
value: "64Mi".to_string(),
},
ChartSetValue {
key: "cainjector.resources.requests.memory".to_string(),
value: "1Gi".to_string(),
value: "64Mi".to_string(),
},
],
..Default::default()
},
};
let cert_manager_config = get_chart_for_cert_manager(
let cert_manager_config = get_chart_for_cert_manager_config(
&chart_config_prerequisites.dns_provider_config,
chart_path("common/charts/cert-manager-configs"),
chart_config_prerequisites.dns_email_report.clone(),
@@ -262,31 +241,15 @@ pub fn ec2_aws_helm_charts(
values_files: vec![chart_path("chart_values/nginx-ingress.yaml")],
values: vec![
// Controller resources limits
ChartSetValue {
key: "controller.resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "controller.resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "controller.resources.limits.memory".to_string(),
value: "768Mi".to_string(),
value: "192Mi".to_string(),
},
ChartSetValue {
key: "controller.resources.requests.memory".to_string(),
value: "768Mi".to_string(),
value: "192Mi".to_string(),
},
// Default backend resources limits
ChartSetValue {
key: "defaultBackend.resources.limits.cpu".to_string(),
value: "20m".to_string(),
},
ChartSetValue {
key: "defaultBackend.resources.requests.cpu".to_string(),
value: "10m".to_string(),
},
ChartSetValue {
key: "defaultBackend.resources.limits.memory".to_string(),
value: "32Mi".to_string(),
@@ -309,7 +272,17 @@ pub fn ec2_aws_helm_charts(
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?;
let cluster_agent_resources = vec![
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "50Mi".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
},
];
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, Some(cluster_agent_resources))?;
let shell_context = ShellAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
@@ -320,7 +293,17 @@ pub fn ec2_aws_helm_charts(
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let shell_agent_resources = vec![
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "50Mi".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
},
];
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, Some(shell_agent_resources))?;
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,
@@ -364,21 +347,13 @@ pub fn ec2_aws_helm_charts(
value: format!("http://{}.cluster.local:3100", "not-installed"),
},
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "1".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "500Mi".to_string(),
value: "50Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "500Mi".to_string(),
value: "50Mi".to_string(),
},
],
..Default::default()
@@ -399,9 +374,9 @@ pub fn ec2_aws_helm_charts(
let level_3: Vec<Box<dyn HelmChart>> = vec![];
let level_4: Vec<Box<dyn HelmChart>> = vec![Box::new(aws_calico)];
let level_4: Vec<Box<dyn HelmChart>> = vec![];
let level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(external_dns)];
let level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(external_dns), Box::new(metrics_server)];
let level_6: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress)];

View File

@@ -64,16 +64,11 @@ impl EKS {
let aws_zones = kubernetes::aws_zones(zones, &region, &event_details)?;
for node_group in &nodes_groups {
if let Err(e) = AwsInstancesType::from_str(node_group.instance_type.as_str()) {
let err =
EngineError::new_unsupported_instance_type(event_details, node_group.instance_type.as_str(), e);
logger.log(EngineEvent::Error(err.clone(), None));
return Err(err);
}
}
// ensure config is ok
if let Err(e) = EKS::validate_node_groups(nodes_groups.clone(), &event_details) {
logger.log(EngineEvent::Error(e.clone(), None));
return Err(e);
};
let s3 = kubernetes::s3(&context, &region, &**cloud_provider);
@@ -98,6 +93,49 @@ impl EKS {
})
}
pub fn validate_node_groups(
nodes_groups: Vec<NodeGroups>,
event_details: &EventDetails,
) -> Result<(), EngineError> {
for node_group in &nodes_groups {
match AwsInstancesType::from_str(node_group.instance_type.as_str()) {
Ok(x) => {
if !EKS::is_instance_allowed(x) {
let err = EngineError::new_not_allowed_instance_type(
event_details.clone(),
node_group.instance_type.as_str(),
);
return Err(err);
}
}
Err(e) => {
let err = EngineError::new_unsupported_instance_type(
event_details.clone(),
node_group.instance_type.as_str(),
e,
);
return Err(err);
}
}
}
Ok(())
}
pub fn is_instance_allowed(instance_type: AwsInstancesType) -> bool {
match instance_type {
AwsInstancesType::T2Large => true,
AwsInstancesType::T2Xlarge => true,
AwsInstancesType::T3Small => false,
AwsInstancesType::T3Medium => false,
AwsInstancesType::T3Large => true,
AwsInstancesType::T3Xlarge => true,
AwsInstancesType::T3aSmall => false,
AwsInstancesType::T3aMedium => false,
AwsInstancesType::T3aLarge => true,
AwsInstancesType::T3a2xlarge => true,
}
}
fn set_cluster_autoscaler_replicas(
&self,
event_details: EventDetails,
@@ -669,3 +707,48 @@ impl Listen for EKS {
self.listeners.push(listener);
}
}
#[cfg(test)]
mod tests {
use crate::cloud_provider::aws::kubernetes::eks::EKS;
use crate::cloud_provider::models::NodeGroups;
use crate::errors::Tag;
use crate::events::{EventDetails, InfrastructureStep, Stage, Transmitter};
use crate::io_models::QoveryIdentifier;
#[test]
fn test_allowed_eks_nodes() {
let event_details = EventDetails::new(
None,
QoveryIdentifier::new_random(),
QoveryIdentifier::new_random(),
QoveryIdentifier::new_random(),
None,
Stage::Infrastructure(InfrastructureStep::LoadConfiguration),
Transmitter::Kubernetes("".to_string(), "".to_string()),
);
assert!(EKS::validate_node_groups(
vec![NodeGroups::new("".to_string(), 3, 5, "t3a.large".to_string(), 20).unwrap()],
&event_details,
)
.is_ok());
assert_eq!(
EKS::validate_node_groups(
vec![NodeGroups::new("".to_string(), 3, 5, "t3.small".to_string(), 20).unwrap()],
&event_details
)
.unwrap_err()
.tag(),
&Tag::NotAllowedInstanceType
);
assert_eq!(
EKS::validate_node_groups(
vec![NodeGroups::new("".to_string(), 3, 5, "t1000.terminator".to_string(), 20).unwrap()],
&event_details
)
.unwrap_err()
.tag(),
&Tag::UnsupportedInstanceType
);
}
}

View File

@@ -1,6 +1,6 @@
use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use crate::cloud_provider::helm::{
get_chart_for_cert_manager, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_chart_for_cert_manager_config, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue, ChartValuesGenerated,
ClusterAgentContext, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces,
PrometheusOperatorConfigChart, ShellAgentContext,
@@ -791,7 +791,7 @@ datasources:
},
};
let cert_manager_config = get_chart_for_cert_manager(
let cert_manager_config = get_chart_for_cert_manager_config(
&chart_config_prerequisites.dns_provider_config,
chart_path("common/charts/cert-manager-configs"),
chart_config_prerequisites.dns_email_report.clone(),
@@ -964,7 +964,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?;
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, None)?;
let shell_context = ShellAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
@@ -975,7 +975,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, None)?;
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,

View File

@@ -622,6 +622,9 @@ fn create(
// wait for AWS EC2 K3S port is open to avoid later deployment issues (and kubeconfig not available on S3)
if let Kind::Ec2 = kubernetes.kind() {
kubernetes.delete_local_kubeconfig();
kubernetes.get_kubeconfig_file()?;
let qovery_teraform_config =
get_aws_ec2_qovery_terraform_config(format!("{}/qovery-tf-config.json", &temp_dir).as_str())
.map_err(|e| EngineError::new_terraform_qovery_config_mismatch(event_details.clone(), e))?;

View File

@@ -8,8 +8,11 @@ use std::str::FromStr;
pub enum AwsInstancesType {
T2Large, // 2 cores 8Gb RAM
T2Xlarge, // 4 cores 16Gb RAM
T3Small, // 2 cores 2Gb RAM
T3Medium, // 2 cores 4Gb RAM
T3Large, // 2 cores 8Gb RAM
T3Xlarge, // 4 cores 16Gb RAM
T3aSmall, // 2 cores 2Gb RAM
T3aMedium, // 2 cores 4Gb RAM
T3aLarge, // 2 cores 8Gb RAM
T3a2xlarge, // 8 cores 32Gb RAM
@@ -25,6 +28,9 @@ impl InstanceType for AwsInstancesType {
AwsInstancesType::T3aMedium => "t3a.medium",
AwsInstancesType::T3aLarge => "t3a.large",
AwsInstancesType::T3a2xlarge => "t3a.2xlarge",
AwsInstancesType::T3Small => "t3.small",
AwsInstancesType::T3Medium => "t3.medium",
AwsInstancesType::T3aSmall => "t3a.small",
}
.to_string()
}
@@ -40,6 +46,9 @@ impl AwsInstancesType {
AwsInstancesType::T3aMedium => "t3a.medium",
AwsInstancesType::T3aLarge => "t3a.large",
AwsInstancesType::T3a2xlarge => "t3a.2xlarge",
AwsInstancesType::T3Small => "t3.small",
AwsInstancesType::T3Medium => "t3.medium",
AwsInstancesType::T3aSmall => "t3a.small",
}
}
}
@@ -54,6 +63,9 @@ impl fmt::Display for AwsInstancesType {
AwsInstancesType::T3aMedium => write!(f, "t3a.medium"),
AwsInstancesType::T3aLarge => write!(f, "t3a.large"),
AwsInstancesType::T3a2xlarge => write!(f, "t3a.2xlarge"),
AwsInstancesType::T3Small => write!(f, "t3.small"),
AwsInstancesType::T3Medium => write!(f, "t3.medium"),
AwsInstancesType::T3aSmall => write!(f, "t3a.small"),
}
}
}
@@ -70,6 +82,9 @@ impl FromStr for AwsInstancesType {
"t3a.medium" => Ok(AwsInstancesType::T3aMedium),
"t3a.large" => Ok(AwsInstancesType::T3aLarge),
"t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge),
"t3.small" => Ok(AwsInstancesType::T3Small),
"t3.medium" => Ok(AwsInstancesType::T3Medium),
"t3a.small" => Ok(AwsInstancesType::T3aSmall),
_ => Err(CommandError::new_from_safe_message(format!(
"`{}` instance type is not supported",
s

View File

@@ -1,6 +1,6 @@
use crate::cloud_provider::digitalocean::kubernetes::DoksOptions;
use crate::cloud_provider::helm::{
get_chart_for_cert_manager, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_chart_for_cert_manager_config, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated, ClusterAgentContext,
CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext,
};
@@ -604,7 +604,7 @@ datasources:
},
};
let cert_manager_config = get_chart_for_cert_manager(
let cert_manager_config = get_chart_for_cert_manager_config(
&chart_config_prerequisites.dns_provider_config,
chart_path("common/charts/cert-manager-configs"),
chart_config_prerequisites.dns_email_report.clone(),
@@ -772,7 +772,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?;
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, None)?;
let shell_context = ShellAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
@@ -783,7 +783,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, None)?;
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,

View File

@@ -727,6 +727,7 @@ pub struct ShellAgentContext<'a> {
pub fn get_chart_for_shell_agent(
context: ShellAgentContext,
chart_path: impl Fn(&str) -> String,
custom_resources: Option<Vec<ChartSetValue>>,
) -> Result<CommonChart, CommandError> {
let shell_agent_version: QoveryShellAgent = get_qovery_app_version(
QoveryAppName::ShellAgent,
@@ -734,7 +735,7 @@ pub fn get_chart_for_shell_agent(
context.api_url,
context.cluster_id,
)?;
let shell_agent = CommonChart {
let mut shell_agent = CommonChart {
chart_info: ChartInfo {
name: "shell-agent".to_string(),
path: chart_path("common/charts/qovery/qovery-shell-agent"),
@@ -772,7 +773,15 @@ pub fn get_chart_for_shell_agent(
key: "environmentVariables.ORGANIZATION_ID".to_string(),
value: context.organization_long_id.to_string(),
},
// resources limits
],
..Default::default()
},
};
// resources limits
match custom_resources {
None => {
let mut default_resources = vec![
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "1".to_string(),
@@ -789,10 +798,14 @@ pub fn get_chart_for_shell_agent(
key: "resources.requests.memory".to_string(),
value: "100Mi".to_string(),
},
],
..Default::default()
},
};
];
shell_agent.chart_info.values.append(&mut default_resources)
}
Some(custom_resources) => {
let mut custom_resources_tmp = custom_resources;
shell_agent.chart_info.values.append(&mut custom_resources_tmp)
}
}
Ok(shell_agent)
}
@@ -813,6 +826,7 @@ pub struct ClusterAgentContext<'a> {
pub fn get_chart_for_cluster_agent(
context: ClusterAgentContext,
chart_path: impl Fn(&str) -> String,
custom_resources: Option<Vec<ChartSetValue>>,
) -> Result<CommonChart, CommandError> {
let shell_agent_version: QoveryShellAgent = get_qovery_app_version(
QoveryAppName::ClusterAgent,
@@ -820,7 +834,7 @@ pub fn get_chart_for_cluster_agent(
context.api_url,
context.cluster_id,
)?;
let cluster_agent = CommonChart {
let mut cluster_agent = CommonChart {
chart_info: ChartInfo {
name: "cluster-agent".to_string(),
path: chart_path("common/charts/qovery/qovery-cluster-agent"),
@@ -858,7 +872,15 @@ pub fn get_chart_for_cluster_agent(
key: "environmentVariables.ORGANIZATION_ID".to_string(),
value: context.organization_long_id.to_string(),
},
// resources limits
],
..Default::default()
},
};
// resources limits
match custom_resources {
None => {
let mut default_resources = vec![
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "200m".to_string(),
@@ -875,16 +897,20 @@ pub fn get_chart_for_cluster_agent(
key: "resources.limits.memory".to_string(),
value: "500Mi".to_string(),
},
],
..Default::default()
},
};
];
cluster_agent.chart_info.values.append(&mut default_resources)
}
Some(custom_resources) => {
let mut custom_resources_tmp = custom_resources;
cluster_agent.chart_info.values.append(&mut custom_resources_tmp)
}
}
Ok(cluster_agent)
}
// Cert manager
pub fn get_chart_for_cert_manager(
pub fn get_chart_for_cert_manager_config(
dns_provider_config: &DnsProviderConfiguration,
chart_path: String,
lets_encrypt_email_report: String,

View File

@@ -13,10 +13,11 @@ use retry::delay::{Fibonacci, Fixed};
use retry::Error::Operation;
use retry::OperationResult;
use serde::{Deserialize, Serialize};
use tokio::fs;
use crate::cloud_provider::aws::regions::AwsZones;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::models::{CpuLimits, NodeGroups};
use crate::cloud_provider::models::{CpuLimits, InstanceEc2, NodeGroups};
use crate::cloud_provider::service::CheckAction;
use crate::cloud_provider::{service, CloudProvider, DeploymentTarget};
use crate::cmd::kubectl;
@@ -181,6 +182,15 @@ pub trait Kubernetes: Listen {
Ok(path)
}
fn delete_local_kubeconfig(&self) {
// just ignoring if not already present
let file = match self.get_kubeconfig_file_path() {
Ok(x) => x,
Err(_) => return,
};
let _ = fs::remove_file(file);
}
fn resources(&self, _environment: &Environment) -> Result<Resources, EngineError> {
let kubernetes_config_file_path = self.get_kubeconfig_file_path()?;
let stage = Stage::General(GeneralStep::RetrieveClusterResources);
@@ -1258,6 +1268,15 @@ impl NodeGroups {
}
}
impl InstanceEc2 {
pub fn new(instance_type: String, disk_size_in_gib: i32) -> InstanceEc2 {
InstanceEc2 {
instance_type,
disk_size_in_gib,
}
}
}
/// TODO(benjaminch): to be refactored with similar function in services.rs
/// This function call (start|pause|delete)_in_progress function every 10 seconds when a
/// long blocking task is running.

View File

@@ -80,3 +80,8 @@ pub struct NodeGroupsFormat {
pub instance_type: String,
pub disk_size_in_gib: String,
}
pub struct InstanceEc2 {
pub instance_type: String,
pub disk_size_in_gib: i32,
}

View File

@@ -1,5 +1,5 @@
use crate::cloud_provider::helm::{
get_chart_for_cert_manager, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_chart_for_cert_manager_config, get_chart_for_cluster_agent, get_chart_for_shell_agent,
get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated, ClusterAgentContext,
CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext,
};
@@ -553,7 +553,7 @@ datasources:
},
};
let cert_manager_config = get_chart_for_cert_manager(
let cert_manager_config = get_chart_for_cert_manager_config(
&chart_config_prerequisites.dns_provider_config,
chart_path("common/charts/cert-manager-configs"),
chart_config_prerequisites.dns_email_report.clone(),
@@ -645,7 +645,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?;
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path, None)?;
let shell_context = ShellAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
@@ -656,7 +656,7 @@ datasources:
cluster_jwt_token: &chart_config_prerequisites.infra_options.jwt_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path, None)?;
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,

View File

@@ -61,7 +61,7 @@ impl DnsProvider for Cloudflare {
}
fn insert_into_teracontext<'a>(&self, context: &'a mut TeraContext) -> &'a mut TeraContext {
context.insert("external_dns_provider", &self.name);
context.insert("external_dns_provider", &self.provider_name());
context.insert("cloudflare_email", &self.cloudflare_email);
context.insert("cloudflare_api_token", &self.cloudflare_api_token);
context

View File

@@ -65,7 +65,7 @@ impl DnsProvider for QoveryDns {
}
fn insert_into_teracontext<'a>(&self, context: &'a mut TeraContext) -> &'a mut TeraContext {
context.insert("external_dns_provider", &self.name);
context.insert("external_dns_provider", &self.provider_name());
context.insert("qoverydns_api_url", &self.api_url);
context.insert("qoverydns_api_port", &self.api_port);
context.insert("qoverydns_api_key", &self.api_key);

View File

@@ -74,6 +74,7 @@ pub enum Tag {
CannotGetAnyAvailableVPC,
UnsupportedVersion,
UnsupportedClusterKind,
NotAllowedInstanceType,
CannotGetSupportedVersions,
CannotGetCluster,
ContainerRegistryError,
@@ -227,6 +228,7 @@ impl From<errors::Tag> for Tag {
errors::Tag::BuilderError => Tag::BuilderError,
errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError,
errors::Tag::UnsupportedClusterKind => Tag::UnsupportedClusterKind,
errors::Tag::NotAllowedInstanceType => Tag::NotAllowedInstanceType,
errors::Tag::TerraformQoveryConfigMismatch => Tag::TerraformQoveryConfigMismatch,
}
}

View File

@@ -175,6 +175,8 @@ pub enum Tag {
CannotGetWorkspaceDirectory,
/// UnsupportedInstanceType: represents an unsupported instance type for the given cloud provider.
UnsupportedInstanceType,
/// NotAllowedInstanceType: represents not allowed instance type for a specific kind of cluster
NotAllowedInstanceType,
/// UnsupportedClusterKind: represents an unsupported cluster kind by Qovery.
UnsupportedClusterKind,
/// UnsupportedRegion: represents an unsupported region for the given cloud provider.
@@ -601,6 +603,30 @@ impl EngineError {
)
}
/// Creates new error for not allowed instance type.
///
/// Qovery doesn't allow the requested instance type.
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `requested_instance_type`: Raw requested instance type string.
pub fn new_not_allowed_instance_type(event_details: EventDetails, requested_instance_type: &str) -> EngineError {
let message = format!(
"`{}` instance type is not allowed for this kind of cluster",
requested_instance_type
);
EngineError::new(
event_details,
Tag::NotAllowedInstanceType,
message.to_string(),
message,
None,
None, // TODO(documentation): Create a page entry to details this error
Some("Selected instance type is not allowed, please check Qovery's documentation.".to_string()),
)
}
/// Creates new error for unsupported instance type.
///
/// Cloud provider doesn't support the requested instance type.

View File

@@ -1 +1,7 @@
use qovery_engine::cloud_provider::models::InstanceEc2;
pub const AWS_K3S_VERSION: &str = "v1.20.15+k3s1";
pub fn ec2_kubernetes_instance() -> InstanceEc2 {
InstanceEc2::new("t3.small".to_string(), 20)
}

View File

@@ -12,6 +12,7 @@ use qovery_engine::io_models::{
};
use crate::aws::{AWS_KUBERNETES_VERSION, AWS_TEST_REGION};
use crate::aws_ec2::ec2_kubernetes_instance;
use crate::digitalocean::{DO_KUBERNETES_VERSION, DO_TEST_REGION};
use crate::scaleway::{SCW_KUBERNETES_VERSION, SCW_TEST_ZONE};
use crate::utilities::{
@@ -1363,6 +1364,7 @@ pub fn get_environment_test_kubernetes(
cloud_provider,
dns_provider,
options,
ec2_kubernetes_instance(),
logger,
)
.unwrap(),
@@ -1472,6 +1474,7 @@ pub fn get_cluster_test_kubernetes<'a>(
cloud_provider,
dns_provider,
options,
ec2_kubernetes_instance(),
logger,
)
.unwrap(),

View File

@@ -43,11 +43,6 @@ fn create_and_destroy_aws_ec2_k3s_cluster(
})
}
/*
TESTS NOTES:
It is useful to keep 2 clusters deployment tests to run in // to validate there is no name collision (overlaping)
*/
#[cfg(feature = "test-aws-infra-ec2")]
#[named]
#[test]