Merge branch 'dev' into refactor/extract-state-out-of-engine-event

This commit is contained in:
Benjamin
2022-02-18 19:30:54 +01:00
committed by GitHub
44 changed files with 2217 additions and 1413 deletions

1
.gitignore vendored
View File

@@ -1,5 +1,6 @@
**/target
*.iml
*.orig
.idea
.qovery-workspace
.terraform/

View File

@@ -106,3 +106,6 @@ test-all-whole-enchilada = ["test-aws-whole-enchilada", "test-do-whole-enchilada
test-aws-all = ["test-aws-infra", "test-aws-managed-services", "test-aws-self-hosted", "test-aws-whole-enchilada"]
test-do-all = ["test-do-infra", "test-do-managed-services", "test-do-self-hosted", "test-do-whole-enchilada"]
test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-hosted", "test-scw-whole-enchilada"]
# functionnal test with only a k8s cluster as a dependency
test-with-kube = []

View File

@@ -505,7 +505,7 @@ controller:
admissionWebhooks:
annotations: {}
enabled: true
enabled: false
failurePolicy: Fail
# timeoutSeconds: 10
port: 8443

View File

@@ -1342,7 +1342,7 @@ prometheusOperator:
## rules from making their way into prometheus and potentially preventing the container from starting
admissionWebhooks:
failurePolicy: Fail
enabled: true
enabled: false
## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
## If unspecified, system trust roots on the apiserver are used.
caBundle: ""
@@ -1377,7 +1377,7 @@ prometheusOperator:
# Use certmanager to generate webhook certs
certManager:
enabled: false
enabled: true
# issuerRef:
# name: "issuer"
# kind: "ClusterIssuer"

View File

@@ -1,9 +1,9 @@
apiVersion: v2
appVersion: 0.10.1
appVersion: 0.10.4
description: Automatically removes Cloud managed services and Kubernetes resources
based on tags with TTL
home: https://github.com/Qovery/pleco
icon: https://github.com/Qovery/pleco/raw/main/assets/pleco_logo.png
name: pleco
type: application
version: 0.10.1
version: 0.10.4

View File

@@ -148,8 +148,6 @@ spec:
{{ end }}
{{- end }}
env:
- name: "AWS_EXECUTION_ENV"
value: "pleco_{{ .Values.image.plecoImageTag }}_{{ .Values.environmentVariables.PLECO_IDENTIFIER }}"
{{ range $key, $value := .Values.environmentVariables -}}
- name: "{{ $key }}"
valueFrom:

View File

@@ -3,7 +3,7 @@ replicaCount: 1
image:
repository: qoveryrd/pleco
pullPolicy: IfNotPresent
plecoImageTag: "0.10.1"
plecoImageTag: "0.10.4"
cloudProvider: ""

View File

@@ -70,7 +70,7 @@ charts:
dest: services
no_sync: true
- name: pleco
version: 0.10.1
version: 0.10.4
repo_name: pleco
- name: do-k8s-token-rotate
version: 0.1.3

View File

@@ -1,23 +1,34 @@
{% for scw_ks_worker_node in scw_ks_worker_nodes %}
resource "scaleway_k8s_pool" "kubernetes_cluster_workers_{{ loop.index }}" {
cluster_id = scaleway_k8s_cluster.kubernetes_cluster.id
name = "${var.kubernetes_cluster_id}_{{ loop.index }}"
name = "${var.kubernetes_cluster_id}_{{ scw_ks_worker_node.instance_type }}_{{ loop.index }}"
node_type = "{{ scw_ks_worker_node.instance_type }}"
region = var.region
zone = var.zone
# use Scaleway built-in cluster autoscaler
autoscaling = {{ scw_ks_pool_autoscale }}
autohealing = true
size = "{{ scw_ks_worker_node.min_nodes }}"
min_size = "{{ scw_ks_worker_node.min_nodes }}"
max_size = "{{ scw_ks_worker_node.max_nodes }}"
autoscaling = {{ scw_ks_pool_autoscale }}
autohealing = true
size = "{{ scw_ks_worker_node.min_nodes }}"
min_size = "{{ scw_ks_worker_node.min_nodes }}"
max_size = "{{ scw_ks_worker_node.max_nodes }}"
wait_for_pool_ready = true
# Not yet available: https://github.com/scaleway/terraform-provider-scaleway/issues/998
#timeouts {
# create = "60m"
# delete = "60m"
# update = "60m"
#}
depends_on = [
scaleway_k8s_cluster.kubernetes_cluster,
]
tags = concat(local.tags_ks_list, ["QoveryNodeGroupName:{{ scw_ks_worker_node.name }}", "QoveryNodeGroupId:${var.kubernetes_cluster_id}-{{ loop.index }}"])
lifecycle {
create_before_destroy = true
}
tags = concat(local.tags_ks_list, ["QoveryNodeGroupName:{{ scw_ks_worker_node.name }}", "QoveryNodeGroupId:${var.kubernetes_cluster_id}_{{ scw_ks_worker_node.instance_type }}_{{ loop.index }}"])
}
{% endfor %}
{% endfor %}

View File

@@ -77,7 +77,7 @@ variable "kubernetes_cluster_id" {
variable "kubernetes_cluster_name" {
description = "Kubernetes cluster name"
default = "qovery-{{ kubernetes_cluster_id }}" # TODO(benjaminch): handle name creation in code
default = "{{ kubernetes_cluster_name }}"
type = string
}

View File

@@ -9,7 +9,7 @@ terraform {
required_providers {
scaleway = {
source = "scaleway/scaleway"
version = "~> 2.1.0"
version = "~> 2.2.0"
}
aws = {
source = "hashicorp/aws"
@@ -28,7 +28,7 @@ terraform {
version = "~> 2.24.1"
}
}
required_version = ">= 0.13"
required_version = ">= 0.14"
}

View File

@@ -1,7 +1,7 @@
use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use crate::cloud_provider::helm::{
get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartPayload, ChartSetValue,
ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmAction, HelmChart, HelmChartNamespaces,
ChartValuesGenerated, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces,
PrometheusOperatorConfigChart, ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
@@ -458,6 +458,7 @@ pub fn aws_helm_charts(
},
};
/* Example to delete an old install
let old_prometheus_operator = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
name: "prometheus-operator".to_string(),
@@ -465,7 +466,7 @@ pub fn aws_helm_charts(
action: HelmAction::Destroy,
..Default::default()
},
};
};*/
let kube_prometheus_stack = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
@@ -704,7 +705,9 @@ datasources:
},
ChartSetValue {
key: "prometheus.servicemonitor.enabled".to_string(),
value: chart_config_prerequisites.ff_metrics_history_enabled.to_string(),
// Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require
// prometheus to be already installed
value: "false".to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.prometheusInstance".to_string(),
@@ -730,11 +733,11 @@ datasources:
// Webhooks resources limits
ChartSetValue {
key: "webhook.resources.limits.cpu".to_string(),
value: "20m".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.cpu".to_string(),
value: "20m".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "webhook.resources.limits.memory".to_string(),
@@ -1153,26 +1156,27 @@ datasources:
Box::new(q_storage_class),
Box::new(coredns_config),
Box::new(aws_vpc_cni_chart),
Box::new(old_prometheus_operator),
];
let mut level_2: Vec<Box<dyn HelmChart>> = vec![];
let level_2: Vec<Box<dyn HelmChart>> = vec![Box::new(cert_manager)];
let mut level_3: Vec<Box<dyn HelmChart>> = vec![
let mut level_3: Vec<Box<dyn HelmChart>> = vec![];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![
Box::new(cluster_autoscaler),
Box::new(aws_iam_eks_user_mapper),
Box::new(aws_calico),
];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![
let mut level_5: Vec<Box<dyn HelmChart>> = vec![
Box::new(metrics_server),
Box::new(aws_node_term_handler),
Box::new(external_dns),
];
let mut level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress), Box::new(cert_manager)];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress)];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![
let mut level_7: Vec<Box<dyn HelmChart>> = vec![
Box::new(cert_manager_config),
Box::new(qovery_agent),
Box::new(shell_agent),
@@ -1181,26 +1185,26 @@ datasources:
// observability
if chart_config_prerequisites.ff_metrics_history_enabled {
level_2.push(Box::new(kube_prometheus_stack));
level_4.push(Box::new(prometheus_adapter));
level_4.push(Box::new(kube_state_metrics));
level_3.push(Box::new(kube_prometheus_stack));
level_5.push(Box::new(prometheus_adapter));
level_5.push(Box::new(kube_state_metrics));
}
if chart_config_prerequisites.ff_log_history_enabled {
level_3.push(Box::new(promtail));
level_4.push(Box::new(loki));
level_4.push(Box::new(promtail));
level_5.push(Box::new(loki));
}
if chart_config_prerequisites.ff_metrics_history_enabled || chart_config_prerequisites.ff_log_history_enabled {
level_6.push(Box::new(grafana))
level_7.push(Box::new(grafana))
};
// pleco
if !chart_config_prerequisites.disable_pleco {
level_5.push(Box::new(pleco));
level_6.push(Box::new(pleco));
}
info!("charts configuration preparation finished");
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6])
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6, level_7])
}
// AWS CNI

View File

@@ -14,7 +14,7 @@ use crate::cloud_provider::aws::kubernetes::node::AwsInstancesType;
use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create;
use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones};
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo};
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes,
KubernetesNodesType, KubernetesUpgradeStatus, ProviderOptions,
@@ -24,11 +24,11 @@ use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::utilities::print_action;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd;
use crate::cmd::helm::{to_engine_error, Helm};
use crate::cmd::kubectl::{
kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events,
kubectl_exec_scale_replicas, ScalingKind,
};
use crate::cmd::structs::HelmChart;
use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list};
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider;
@@ -947,7 +947,10 @@ impl<'a> EKS<'a> {
};
if tf_workers_resources.is_empty() {
return Err(EngineError::new_cluster_has_no_worker_nodes(event_details.clone()));
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details.clone(),
None,
));
}
let kubernetes_config_file_path = self.get_kubeconfig_file_path()?;
@@ -1244,15 +1247,14 @@ impl<'a> EKS<'a> {
);
// delete custom metrics api to avoid stale namespaces on deletion
let _ = cmd::helm::helm_uninstall_list(
let helm = Helm::new(
&kubernetes_config_file_path,
vec![HelmChart {
name: "metrics-server".to_string(),
namespace: "kube-system".to_string(),
version: None,
}],
self.cloud_provider().credentials_environment_variables(),
);
&self.cloud_provider.credentials_environment_variables(),
)
.map_err(|e| to_engine_error(&event_details, e))?;
let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system");
helm.uninstall(&chart, &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
// required to avoid namespace stuck on deletion
uninstall_cert_manager(
@@ -1272,50 +1274,27 @@ impl<'a> EKS<'a> {
let qovery_namespaces = get_qovery_managed_namespaces();
for qovery_namespace in qovery_namespaces.iter() {
let charts_to_delete = cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
Some(qovery_namespace),
);
match charts_to_delete {
Ok(charts) => {
for chart in charts {
match cmd::helm::helm_exec_uninstall(
&kubernetes_config_file_path,
&chart.namespace,
&chart.name,
self.cloud_provider().credentials_environment_variables(),
) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`", chart.name);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, Some(e.message())),
),
)
}
}
}
}
Err(e) => {
if !(e.message().contains("not found")) {
let charts_to_delete = helm
.list_release(Some(qovery_namespace), &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
for chart in charts_to_delete {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`: {}", &chart.name, e);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!(
"Can't delete the namespace {}",
qovery_namespace
)),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1370,18 +1349,11 @@ impl<'a> EKS<'a> {
),
);
match cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
None,
) {
match helm.list_release(None, &vec![]) {
Ok(helm_charts) => {
for chart in helm_charts {
match cmd::helm::helm_uninstall_list(
&kubernetes_config_file_path,
vec![chart.clone()],
self.cloud_provider().credentials_environment_variables(),
) {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
@@ -1390,12 +1362,12 @@ impl<'a> EKS<'a> {
),
),
Err(e) => {
let message_safe = format!("Error deleting chart `{}` deleted", chart.name);
let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, e.message),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1408,7 +1380,7 @@ impl<'a> EKS<'a> {
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe.to_string(), Some(e.message())),
EventMessage::new(message_safe.to_string(), Some(e.to_string())),
),
)
}

View File

@@ -87,6 +87,7 @@ mod tests {
NodeGroups::new("".to_string(), 2, 2, "t2.large".to_string(), 20).unwrap(),
NodeGroups {
name: "".to_string(),
id: None,
min_nodes: 2,
max_nodes: 2,
instance_type: "t2.large".to_string(),

View File

@@ -1,5 +1,6 @@
use tera::Context as TeraContext;
use crate::cloud_provider::helm::ChartInfo;
use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate};
use crate::cloud_provider::service::{
default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create,
@@ -7,8 +8,9 @@ use crate::cloud_provider::service::{
};
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::cmd::helm;
use crate::cmd::helm::{to_engine_error, Timeout};
use crate::error::{EngineError, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners};
@@ -325,25 +327,26 @@ impl Create for Router {
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
let helm = helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
.map_err(|e| to_engine_error(&event_details, e).to_legacy_engine_error())?;
let chart = ChartInfo::new_from_custom_namespace(
helm_release_name,
workspace_dir.clone(),
environment.namespace().to_string(),
600_i64,
match self.service_type() {
ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)],
_ => vec![],
},
false,
self.selector(),
);
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
}
Ok(())
helm.upgrade(&chart, &vec![])
.map_err(|e| NewEngineError::new_helm_error(event_details.clone(), e).to_legacy_engine_error())
}
fn on_create_check(&self) -> Result<(), EngineError> {

View File

@@ -1,8 +1,7 @@
use crate::cloud_provider::digitalocean::kubernetes::DoksOptions;
use crate::cloud_provider::helm::{
get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated,
CommonChart, CoreDNSConfigChart, HelmAction, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart,
ShellAgentContext,
CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::errors::CommandError;
@@ -309,6 +308,7 @@ pub fn do_helm_charts(
},
};
/*
let old_prometheus_operator = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
name: "prometheus-operator".to_string(),
@@ -316,7 +316,7 @@ pub fn do_helm_charts(
action: HelmAction::Destroy,
..Default::default()
},
};
};*/
let kube_prometheus_stack = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
@@ -544,7 +544,9 @@ datasources:
},
ChartSetValue {
key: "prometheus.servicemonitor.enabled".to_string(),
value: chart_config_prerequisites.ff_metrics_history_enabled.to_string(),
// Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require
// prometheus to be already installed
value: "false".to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.prometheusInstance".to_string(),
@@ -570,11 +572,11 @@ datasources:
// Webhooks resources limits
ChartSetValue {
key: "webhook.resources.limits.cpu".to_string(),
value: "20m".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.cpu".to_string(),
value: "20m".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "webhook.resources.limits.memory".to_string(),
@@ -1027,19 +1029,15 @@ datasources:
};
// chart deployment order matters!!!
let level_1: Vec<Box<dyn HelmChart>> = vec![
Box::new(q_storage_class),
Box::new(coredns_config),
Box::new(old_prometheus_operator),
];
let level_1: Vec<Box<dyn HelmChart>> = vec![Box::new(q_storage_class), Box::new(coredns_config)];
let mut level_2: Vec<Box<dyn HelmChart>> = vec![Box::new(container_registry_secret)];
let mut level_2: Vec<Box<dyn HelmChart>> = vec![Box::new(container_registry_secret), Box::new(cert_manager)];
let mut level_3: Vec<Box<dyn HelmChart>> = vec![];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![Box::new(metrics_server), Box::new(external_dns)];
let mut level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress), Box::new(cert_manager)];
let mut level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress)];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![
Box::new(cert_manager_config),

View File

@@ -26,11 +26,10 @@ use crate::cloud_provider::models::NodeGroups;
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::utilities::{print_action, VersionsNumber};
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd::helm::{helm_exec_upgrade_with_chart_info, helm_upgrade_diff_with_chart_info};
use crate::cmd::helm::{to_engine_error, Helm};
use crate::cmd::kubectl::{
do_kubectl_exec_get_loadbalancer_id, kubectl_exec_get_all_namespaces, kubectl_exec_get_events,
};
use crate::cmd::structs::HelmChart;
use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list};
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider::DnsProvider;
@@ -816,18 +815,16 @@ impl<'a> DOKS<'a> {
..Default::default()
};
let _ = helm_upgrade_diff_with_chart_info(
&kubeconfig_path,
&credentials_environment_variables,
&load_balancer_dns_hostname,
);
helm_exec_upgrade_with_chart_info(
let helm = Helm::new(
&kubeconfig_path,
&self.cloud_provider.credentials_environment_variables(),
&load_balancer_dns_hostname,
)
.map_err(|e| EngineError::new_helm_charts_deploy_error(event_details.clone(), e))
.map_err(|e| EngineError::new_helm_error(event_details.clone(), e))?;
// This will ony print the diff on stdout
let _ = helm.upgrade_diff(&load_balancer_dns_hostname, &vec![]);
helm.upgrade(&load_balancer_dns_hostname, &vec![])
.map_err(|e| EngineError::new_helm_error(event_details.clone(), e))
}
fn create_error(&self) -> Result<(), EngineError> {
@@ -1096,15 +1093,14 @@ impl<'a> DOKS<'a> {
);
// delete custom metrics api to avoid stale namespaces on deletion
let _ = cmd::helm::helm_uninstall_list(
let helm = Helm::new(
&kubernetes_config_file_path,
vec![HelmChart {
name: "metrics-server".to_string(),
namespace: "kube-system".to_string(),
version: None,
}],
self.cloud_provider().credentials_environment_variables(),
);
&self.cloud_provider.credentials_environment_variables(),
)
.map_err(|e| to_engine_error(&event_details, e))?;
let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system");
helm.uninstall(&chart, &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
// required to avoid namespace stuck on deletion
uninstall_cert_manager(
@@ -1124,50 +1120,27 @@ impl<'a> DOKS<'a> {
let qovery_namespaces = get_qovery_managed_namespaces();
for qovery_namespace in qovery_namespaces.iter() {
let charts_to_delete = cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
Some(qovery_namespace),
);
match charts_to_delete {
Ok(charts) => {
for chart in charts {
match cmd::helm::helm_exec_uninstall(
&kubernetes_config_file_path,
&chart.namespace,
&chart.name,
self.cloud_provider().credentials_environment_variables(),
) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`", chart.name);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, Some(e.message())),
),
)
}
}
}
}
Err(e) => {
if !(e.message().contains("not found")) {
let charts_to_delete = helm
.list_release(Some(qovery_namespace), &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
for chart in charts_to_delete {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`", chart.name);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!(
"Can't delete the namespace {}",
qovery_namespace
)),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1222,18 +1195,11 @@ impl<'a> DOKS<'a> {
),
);
match cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
None,
) {
match helm.list_release(None, &vec![]) {
Ok(helm_charts) => {
for chart in helm_charts {
match cmd::helm::helm_uninstall_list(
&kubernetes_config_file_path,
vec![chart.clone()],
self.cloud_provider().credentials_environment_variables(),
) {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
@@ -1242,12 +1208,12 @@ impl<'a> DOKS<'a> {
),
),
Err(e) => {
let message_safe = format!("Error deleting chart `{}` deleted", chart.name);
let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, e.message),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1260,7 +1226,7 @@ impl<'a> DOKS<'a> {
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe.to_string(), Some(e.message())),
EventMessage::new(message_safe.to_string(), Some(e.to_string())),
),
)
}

View File

@@ -133,6 +133,7 @@ mod tests {
NodeGroups::new("".to_string(), 2, 2, "s-2vcpu-4gb".to_string(), 20).unwrap(),
NodeGroups {
name: "".to_string(),
id: None,
min_nodes: 2,
max_nodes: 2,
instance_type: "s-2vcpu-4gb".to_string(),

View File

@@ -1,5 +1,6 @@
use tera::Context as TeraContext;
use crate::cloud_provider::helm::ChartInfo;
use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate};
use crate::cloud_provider::service::{
default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create,
@@ -7,6 +8,7 @@ use crate::cloud_provider::service::{
};
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm;
use crate::cmd::helm::Timeout;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
@@ -345,25 +347,26 @@ impl Create for Router {
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
let helm = helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
let chart = ChartInfo::new_from_custom_namespace(
helm_release_name,
workspace_dir.clone(),
environment.namespace().to_string(),
600_i64,
match self.service_type() {
ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)],
_ => vec![],
},
false,
self.selector(),
);
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
}
Ok(())
helm.upgrade(&chart, &vec![])
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())
}
fn on_create_check(&self) -> Result<(), EngineError> {

View File

@@ -1,10 +1,7 @@
use crate::cloud_provider::helm::HelmAction::Deploy;
use crate::cloud_provider::helm::HelmChartNamespaces::KubeSystem;
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAppName, QoveryShellAgent};
use crate::cmd::helm::{
helm_destroy_chart_if_breaking_changes_version_detected, helm_exec_uninstall_with_chart_info,
helm_exec_upgrade_with_chart_info, helm_upgrade_diff_with_chart_info, is_chart_deployed,
};
use crate::cmd::helm::{to_command_error, Helm};
use crate::cmd::kubectl::{
kubectl_delete_crash_looping_pods, kubectl_exec_delete_crd, kubectl_exec_get_configmap, kubectl_exec_get_events,
kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output,
@@ -20,7 +17,7 @@ use thread::spawn;
use tracing::{span, Level};
use uuid::Uuid;
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq)]
pub enum HelmAction {
Deploy,
Destroy,
@@ -108,6 +105,15 @@ impl ChartInfo {
}
}
pub fn new_from_release_name(name: &str, custom_namespace: &str) -> ChartInfo {
ChartInfo {
name: name.to_string(),
namespace: HelmChartNamespaces::Custom,
custom_namespace: Some(custom_namespace.to_string()),
..Default::default()
}
}
pub fn get_namespace_string(&self) -> String {
match self.namespace {
HelmChartNamespaces::Custom => self
@@ -130,7 +136,7 @@ impl Default for ChartInfo {
atomic: true,
force_upgrade: false,
last_breaking_version_requiring_restart: None,
timeout_in_seconds: 300,
timeout_in_seconds: 600,
dry_run: false,
wait: true,
values: Vec::new(),
@@ -216,36 +222,22 @@ pub trait HelmChart: Send {
) -> Result<Option<ChartPayload>, CommandError> {
let environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
let chart_info = self.get_chart_info();
let helm = Helm::new(kubernetes_config, &environment_variables).map_err(to_command_error)?;
match chart_info.action {
HelmAction::Deploy => {
if let Err(e) = helm_destroy_chart_if_breaking_changes_version_detected(
kubernetes_config,
&environment_variables,
chart_info,
) {
if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) {
warn!(
"error while trying to destroy chart if breaking change is detected: {:?}",
e.message()
e.to_string()
);
}
helm_exec_upgrade_with_chart_info(kubernetes_config, &environment_variables, chart_info)?
helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?;
}
HelmAction::Destroy => {
let chart_info = self.get_chart_info();
match is_chart_deployed(
kubernetes_config,
environment_variables.clone(),
Some(chart_info.get_namespace_string().as_str()),
chart_info.name.clone(),
) {
Ok(deployed) => {
if deployed {
helm_exec_uninstall_with_chart_info(kubernetes_config, &environment_variables, chart_info)?
}
}
Err(e) => return Err(e),
};
helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?;
}
HelmAction::Skip => {}
}
@@ -303,24 +295,31 @@ fn deploy_parallel_charts(
handles.push(handle);
}
let mut errors: Vec<Result<(), CommandError>> = vec![];
for handle in handles {
match handle.join() {
Ok(helm_run_ret) => {
if let Err(e) = helm_run_ret {
return Err(e);
errors.push(Err(e));
}
}
Err(e) => {
let safe_message = "Thread panicked during parallel charts deployments.";
return Err(CommandError::new(
let error = Err(CommandError::new(
format!("{}, error: {:?}", safe_message.to_string(), e),
Some(safe_message.to_string()),
));
errors.push(error);
}
}
}
Ok(())
if errors.is_empty() {
Ok(())
} else {
error!("Deployments of charts failed with: {:?}", errors);
errors.remove(0)
}
}
pub fn deploy_charts_levels(
@@ -330,24 +329,24 @@ pub fn deploy_charts_levels(
dry_run: bool,
) -> Result<(), CommandError> {
// first show diff
for level in &charts {
for chart in level {
let envs_ref: Vec<(&str, &str)> = envs.iter().map(|(x, y)| (x.as_str(), y.as_str())).collect();
let helm = Helm::new(&kubernetes_config, &envs_ref).map_err(to_command_error)?;
for level in charts {
// Show diff for all chart in this state
for chart in &level {
let chart_info = chart.get_chart_info();
match chart_info.action {
// don't do diff on destroy or skip
HelmAction::Deploy => {
let _ = helm_upgrade_diff_with_chart_info(&kubernetes_config, envs, chart.get_chart_info());
}
_ => {}
// don't do diff on destroy or skip
if chart_info.action == HelmAction::Deploy {
let _ = helm.upgrade_diff(chart_info, &vec![]);
}
}
}
// then apply
if dry_run {
return Ok(());
}
for level in charts.into_iter() {
// Skip actual deployment if dry run
if dry_run {
continue;
}
if let Err(e) = deploy_parallel_charts(&kubernetes_config, &envs, level) {
return Err(e);
}
@@ -591,47 +590,36 @@ impl HelmChart for PrometheusOperatorConfigChart {
) -> Result<Option<ChartPayload>, CommandError> {
let environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
let chart_info = self.get_chart_info();
let helm = Helm::new(kubernetes_config, &environment_variables).map_err(to_command_error)?;
match chart_info.action {
HelmAction::Deploy => {
if let Err(e) = helm_destroy_chart_if_breaking_changes_version_detected(
kubernetes_config,
&environment_variables,
chart_info,
) {
if let Err(e) = helm.uninstall_chart_if_breaking_version(chart_info, &vec![]) {
warn!(
"error while trying to destroy chart if breaking change is detected: {}",
e.message()
e.to_string()
);
}
helm_exec_upgrade_with_chart_info(kubernetes_config, &environment_variables, chart_info)?
helm.upgrade(&chart_info, &vec![]).map_err(to_command_error)?;
}
HelmAction::Destroy => {
let chart_info = self.get_chart_info();
match is_chart_deployed(
kubernetes_config,
environment_variables.clone(),
Some(chart_info.get_namespace_string().as_str()),
chart_info.name.clone(),
) {
Ok(deployed) => {
if deployed {
let prometheus_crds = [
"prometheuses.monitoring.coreos.com",
"prometheusrules.monitoring.coreos.com",
"servicemonitors.monitoring.coreos.com",
"podmonitors.monitoring.coreos.com",
"alertmanagers.monitoring.coreos.com",
"thanosrulers.monitoring.coreos.com",
];
helm_exec_uninstall_with_chart_info(kubernetes_config, &environment_variables, chart_info)?;
for crd in &prometheus_crds {
kubectl_exec_delete_crd(kubernetes_config, crd, environment_variables.clone())?;
}
}
if helm.check_release_exist(&chart_info, &vec![]).is_ok() {
helm.uninstall(&chart_info, &vec![]).map_err(to_command_error)?;
let prometheus_crds = [
"prometheuses.monitoring.coreos.com",
"prometheusrules.monitoring.coreos.com",
"servicemonitors.monitoring.coreos.com",
"podmonitors.monitoring.coreos.com",
"alertmanagers.monitoring.coreos.com",
"thanosrulers.monitoring.coreos.com",
];
for crd in &prometheus_crds {
let _ = kubectl_exec_delete_crd(kubernetes_config, crd, environment_variables.clone());
}
Err(e) => return Err(e),
};
}
}
HelmAction::Skip => {}
}

View File

@@ -34,7 +34,7 @@ use crate::fs::workspace_directory;
use crate::logger::{LogLevel, Logger};
use crate::models::ProgressLevel::Info;
use crate::models::{
Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier,
Action, Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, QoveryIdentifier, StringPath,
};
use crate::object_storage::ObjectStorage;
use crate::unit_conversion::{any_to_mi, cpu_string_to_float};
@@ -78,29 +78,65 @@ pub trait Kubernetes: Listen {
)
}
fn get_kubeconfig_filename(&self) -> String {
format!("{}.yaml", self.id())
}
fn get_kubeconfig_file(&self) -> Result<(String, File), EngineError> {
let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration));
let bucket_name = format!("qovery-kubeconfigs-{}", self.id());
let object_key = format!("{}.yaml", self.id());
let object_key = self.get_kubeconfig_filename();
let stage = Stage::General(GeneralStep::RetrieveClusterConfig);
let (string_path, file) = match self
.config_file_store()
.get(bucket_name.as_str(), object_key.as_str(), true)
{
Ok((path, file)) => (path, file),
Err(err) => {
let error = EngineError::new_cannot_retrieve_cluster_config_file(
self.get_event_details(stage),
CommandError::new_from_safe_message(
format!(
"Error getting file from store, error: {}",
err.message.unwrap_or_else(|| "no details.".to_string())
)
.to_string(),
),
);
self.logger().log(LogLevel::Error, EngineEvent::Error(error.clone()));
return Err(error);
// check if kubeconfig locally exists
let local_kubeconfig = match self.get_temp_dir(event_details) {
Ok(x) => {
let local_kubeconfig_folder_path = format!("{}/{}", &x, &bucket_name);
let local_kubeconfig_generated = format!("{}/{}", &local_kubeconfig_folder_path, &object_key);
if Path::new(&local_kubeconfig_generated).exists() {
match File::open(&local_kubeconfig_generated) {
Ok(_) => Some(local_kubeconfig_generated),
Err(_) => {
debug!("couldn't open {} file", &local_kubeconfig_generated);
None
}
}
} else {
None
}
}
Err(_) => None,
};
// otherwise, try to get it from object storage
let (string_path, file) = match local_kubeconfig {
Some(local_kubeconfig_generated) => {
let kubeconfig_file =
File::open(&local_kubeconfig_generated).expect("couldn't read kubeconfig file, but file exists");
(StringPath::from(&local_kubeconfig_generated), kubeconfig_file)
}
None => {
match self
.config_file_store()
.get(bucket_name.as_str(), object_key.as_str(), true)
{
Ok((path, file)) => (path, file),
Err(err) => {
let error = EngineError::new_cannot_retrieve_cluster_config_file(
self.get_event_details(stage),
CommandError::new_from_safe_message(
format!(
"Error getting file from store, error: {}",
err.message.unwrap_or_else(|| "no details.".to_string())
)
.to_string(),
),
);
self.logger().log(LogLevel::Error, EngineEvent::Error(error.clone()));
return Err(error);
}
}
}
};
@@ -233,11 +269,12 @@ pub trait Kubernetes: Listen {
where
Self: Sized,
{
let kubeconfig = match self.get_kubeconfig_file() {
Ok((path, _)) => path,
Err(e) => return Err(CommandError::new(e.message(), None)),
};
send_progress_on_long_task(self, Action::Create, || {
check_workers_status(
self.get_kubeconfig_file_path().expect("Unable to get Kubeconfig"),
self.cloud_provider().credentials_environment_variables(),
)
check_workers_status(&kubeconfig, self.cloud_provider().credentials_environment_variables())
})
}
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError>;
@@ -436,11 +473,29 @@ pub fn deploy_environment(
"deployment",
CheckAction::Deploy,
)?;
}
// Quick fix: adding 100 ms delay to avoid race condition on service status update
thread::sleep(std::time::Duration::from_millis(100));
// Quick fix: adding 100 ms delay to avoid race condition on service status update
thread::sleep(std::time::Duration::from_millis(100));
// check all deployed services
// check all deployed services
for service in &environment.stateful_services {
let _ = service::check_kubernetes_service_error(
service.exec_check_action(),
kubernetes,
service,
event_details.clone(),
&stateless_deployment_target,
&listeners_helper,
"check deployment",
CheckAction::Deploy,
)?;
}
// Quick fix: adding 100 ms delay to avoid race condition on service status update
thread::sleep(std::time::Duration::from_millis(100));
for service in &environment.stateless_services {
let _ = service::check_kubernetes_service_error(
service.exec_check_action(),
kubernetes,
@@ -1192,6 +1247,7 @@ impl NodeGroups {
Ok(NodeGroups {
name: group_name,
id: None,
min_nodes,
max_nodes,
instance_type,

View File

@@ -65,6 +65,7 @@ pub struct CpuLimits {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct NodeGroups {
pub name: String,
pub id: Option<String>,
pub min_nodes: i32,
pub max_nodes: i32,
pub instance_type: String,

View File

@@ -1,7 +1,6 @@
use crate::cloud_provider::helm::{
get_chart_for_shell_agent, get_engine_helm_action_from_location, ChartInfo, ChartSetValue, ChartValuesGenerated,
CommonChart, CoreDNSConfigChart, HelmAction, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart,
ShellAgentContext,
CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces, PrometheusOperatorConfigChart, ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::cloud_provider::scaleway::application::{ScwRegion, ScwZone};
@@ -283,6 +282,7 @@ pub fn scw_helm_charts(
},
};
/* Example to delete an old chart
let old_prometheus_operator = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
name: "prometheus-operator".to_string(),
@@ -290,7 +290,7 @@ pub fn scw_helm_charts(
action: HelmAction::Destroy,
..Default::default()
},
};
};*/
let kube_prometheus_stack = PrometheusOperatorConfigChart {
chart_info: ChartInfo {
@@ -493,7 +493,9 @@ datasources:
},
ChartSetValue {
key: "prometheus.servicemonitor.enabled".to_string(),
value: chart_config_prerequisites.ff_metrics_history_enabled.to_string(),
// Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require
// prometheus to be already installed
value: "false".to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.prometheusInstance".to_string(),
@@ -519,11 +521,11 @@ datasources:
// Webhooks resources limits
ChartSetValue {
key: "webhook.resources.limits.cpu".to_string(),
value: "20m".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.cpu".to_string(),
value: "20m".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "webhook.resources.limits.memory".to_string(),
@@ -856,21 +858,19 @@ datasources:
};
// chart deployment order matters!!!
let level_1: Vec<Box<dyn HelmChart>> = vec![
Box::new(q_storage_class),
Box::new(coredns_config),
Box::new(old_prometheus_operator),
];
let level_1: Vec<Box<dyn HelmChart>> = vec![Box::new(q_storage_class), Box::new(coredns_config)];
let mut level_2: Vec<Box<dyn HelmChart>> = vec![];
let level_2: Vec<Box<dyn HelmChart>> = vec![Box::new(cert_manager)];
let mut level_3: Vec<Box<dyn HelmChart>> = vec![];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![Box::new(external_dns)];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![];
let mut level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress), Box::new(cert_manager)];
let mut level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(external_dns)];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![
let mut level_6: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress)];
let mut level_7: Vec<Box<dyn HelmChart>> = vec![
Box::new(cert_manager_config),
Box::new(qovery_agent),
Box::new(shell_agent),
@@ -879,24 +879,24 @@ datasources:
// // observability
if chart_config_prerequisites.ff_metrics_history_enabled {
level_2.push(Box::new(kube_prometheus_stack));
level_4.push(Box::new(prometheus_adapter));
level_4.push(Box::new(kube_state_metrics));
level_3.push(Box::new(kube_prometheus_stack));
level_5.push(Box::new(prometheus_adapter));
level_5.push(Box::new(kube_state_metrics));
}
if chart_config_prerequisites.ff_log_history_enabled {
level_3.push(Box::new(promtail));
level_4.push(Box::new(loki));
level_4.push(Box::new(promtail));
level_5.push(Box::new(loki));
}
if chart_config_prerequisites.ff_metrics_history_enabled || chart_config_prerequisites.ff_log_history_enabled {
level_6.push(Box::new(grafana))
level_7.push(Box::new(grafana))
};
// pleco
if !chart_config_prerequisites.disable_pleco {
level_5.push(Box::new(pleco));
level_6.push(Box::new(pleco));
}
info!("charts configuration preparation finished");
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6])
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6, level_7])
}

View File

@@ -3,7 +3,7 @@ pub mod node;
use crate::cloud_provider::aws::regions::AwsZones;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::deploy_charts_levels;
use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo};
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, send_progress_on_long_task, uninstall_cert_manager, Kind, Kubernetes,
KubernetesUpgradeStatus, ProviderOptions,
@@ -12,11 +12,11 @@ use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat};
use crate::cloud_provider::qovery::EngineLocation;
use crate::cloud_provider::scaleway::application::ScwZone;
use crate::cloud_provider::scaleway::kubernetes::helm_charts::{scw_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::scaleway::kubernetes::node::ScwInstancesType;
use crate::cloud_provider::scaleway::kubernetes::node::{ScwInstancesType, ScwNodeGroup};
use crate::cloud_provider::utilities::print_action;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd::helm::{to_engine_error, Helm};
use crate::cmd::kubectl::{kubectl_exec_api_custom_metrics, kubectl_exec_get_all_namespaces, kubectl_exec_get_events};
use crate::cmd::structs::HelmChart;
use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list};
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider::DnsProvider;
@@ -29,18 +29,32 @@ use crate::models::{
};
use crate::object_storage::scaleway_object_storage::{BucketDeleteStrategy, ScalewayOS};
use crate::object_storage::ObjectStorage;
use crate::runtime::block_on;
use crate::string::terraform_list_format;
use crate::{cmd, dns_provider};
use ::function_name::named;
use reqwest::StatusCode;
use retry::delay::{Fibonacci, Fixed};
use retry::Error::Operation;
use retry::OperationResult;
use scaleway_api_rs::apis::Error;
use scaleway_api_rs::models::ScalewayK8sV1Cluster;
use serde::{Deserialize, Serialize};
use std::env;
use std::path::Path;
use std::str::FromStr;
use tera::Context as TeraContext;
#[derive(PartialEq)]
pub enum ScwNodeGroupErrors {
CloudProviderApiError(CommandError),
ClusterDoesNotExists(CommandError),
MultipleClusterFound,
NoNodePoolFound(CommandError),
MissingNodePoolInfo,
NodeGroupValidationError(CommandError),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct KapsuleOptions {
// Qovery
@@ -193,6 +207,210 @@ impl<'a> Kapsule<'a> {
})
}
fn get_configuration(&self) -> scaleway_api_rs::apis::configuration::Configuration {
scaleway_api_rs::apis::configuration::Configuration {
api_key: Some(scaleway_api_rs::apis::configuration::ApiKey {
key: self.options.scaleway_secret_key.clone(),
prefix: None,
}),
..scaleway_api_rs::apis::configuration::Configuration::default()
}
}
fn get_scw_cluster_info(&self) -> Result<Option<ScalewayK8sV1Cluster>, EngineError> {
let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration));
// get cluster info
let cluster_info = match block_on(scaleway_api_rs::apis::clusters_api::list_clusters(
&self.get_configuration(),
self.region().as_str(),
None,
Some(self.options.scaleway_project_id.as_str()),
None,
None,
None,
Some(self.cluster_name().as_str()),
None,
None,
)) {
Ok(x) => x,
Err(e) => {
let msg = format!("wasn't able to retrieve SCW cluster information from the API. {:?}", e);
return Err(EngineError::new_cannot_get_cluster_error(
event_details.clone(),
CommandError::new(msg.clone(), Some(msg)),
));
}
};
// if no cluster exists
let cluster_info_content = cluster_info.clusters.unwrap();
if &cluster_info_content.len() == &(0 as usize) {
return Ok(None);
} else if &cluster_info_content.len() != &(1 as usize) {
let msg = format!(
"too many clusters found with this name, where 1 was expected. {:?}",
&cluster_info_content.len()
);
return Err(EngineError::new_multiple_cluster_found_expected_one_error(
event_details,
CommandError::new(msg.clone(), Some(msg)),
));
}
Ok(Some(cluster_info_content[0].clone()))
}
fn get_existing_sanitized_node_groups(
&self,
cluster_info: ScalewayK8sV1Cluster,
) -> Result<Vec<ScwNodeGroup>, ScwNodeGroupErrors> {
let error_cluster_id = format!("expected cluster id for this Scaleway cluster");
let cluster_id = match cluster_info.id {
None => {
return Err(ScwNodeGroupErrors::NodeGroupValidationError(
CommandError::new_from_safe_message(error_cluster_id),
))
}
Some(x) => x,
};
let pools = match block_on(scaleway_api_rs::apis::pools_api::list_pools(
&self.get_configuration(),
self.region().as_str(),
cluster_id.as_str(),
None,
None,
None,
None,
None,
)) {
Ok(x) => x,
Err(e) => {
let msg = format!("error while trying to get SCW pool info from cluster {}", &cluster_id);
let msg_with_error = format!("{}. {:?}", msg.clone(), e);
return Err(ScwNodeGroupErrors::CloudProviderApiError(CommandError::new(
msg_with_error,
Some(msg),
)));
}
};
// ensure pool are present
if pools.pools.is_none() {
let msg = format!(
"No SCW pool found from the SCW API for cluster {}/{}",
&cluster_id,
&cluster_info.name.unwrap_or("unknown cluster".to_string())
);
return Err(ScwNodeGroupErrors::NoNodePoolFound(CommandError::new(
msg.clone(),
Some(msg),
)));
}
// create sanitized nodegroup pools
let mut nodegroup_pool: Vec<ScwNodeGroup> = Vec::with_capacity(pools.total_count.unwrap_or(0 as f32) as usize);
for ng in pools.pools.unwrap() {
if ng.id.is_none() {
let msg = format!(
"error while trying to validate SCW pool ID from cluster {}",
&cluster_id
);
return Err(ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new(
msg.clone(),
Some(msg),
)));
}
let ng_sanitized = self.get_node_group_info(ng.id.unwrap().as_str())?;
nodegroup_pool.push(ng_sanitized)
}
Ok(nodegroup_pool)
}
fn get_node_group_info(&self, pool_id: &str) -> Result<ScwNodeGroup, ScwNodeGroupErrors> {
let pool = match block_on(scaleway_api_rs::apis::pools_api::get_pool(
&self.get_configuration(),
self.region().as_str(),
pool_id,
)) {
Ok(x) => x,
Err(e) => {
return Err(match e {
Error::ResponseError(x) => {
let msg_with_error = format!(
"Error code while getting node group: {}, API message: {} ",
x.status, x.content
);
match x.status {
StatusCode::NOT_FOUND => ScwNodeGroupErrors::NoNodePoolFound(CommandError::new(
msg_with_error,
Some("No node pool found".to_string()),
)),
_ => ScwNodeGroupErrors::CloudProviderApiError(CommandError::new(
msg_with_error,
Some("Scaleway API error while trying to get node group".to_string()),
)),
}
}
_ => {
let msg = "This Scaleway API error is not supported in the engine, please add it to better support it".to_string();
ScwNodeGroupErrors::NodeGroupValidationError(CommandError::new(msg.clone(), Some(msg)))
}
})
}
};
// ensure there is no missing info
if let Err(e) = self.check_missing_nodegroup_info(&pool.name, "name") {
return Err(e);
};
if let Err(e) = self.check_missing_nodegroup_info(&pool.min_size, "min_size") {
return Err(e);
};
if let Err(e) = self.check_missing_nodegroup_info(&pool.max_size, "max_size") {
return Err(e);
};
if let Err(e) = self.check_missing_nodegroup_info(&pool.status, "status") {
return Err(e);
};
match ScwNodeGroup::new(
pool.id,
pool.name.unwrap(),
pool.min_size.unwrap() as i32,
pool.max_size.unwrap() as i32,
pool.node_type,
pool.size as i32,
pool.status.unwrap(),
) {
Ok(x) => Ok(x),
Err(e) => Err(ScwNodeGroupErrors::NodeGroupValidationError(e)),
}
}
fn check_missing_nodegroup_info<T>(&self, item: &Option<T>, name: &str) -> Result<(), ScwNodeGroupErrors> {
let event_details = self.get_event_details(Infrastructure(InfrastructureStep::LoadConfiguration));
self.logger.log(
LogLevel::Error,
EngineEvent::Error(EngineError::new_missing_workers_group_info_error(
event_details,
CommandError::new_from_safe_message(format!(
"Missing node pool info {} for cluster {}",
name,
self.context.cluster_id()
)),
)),
);
if item.is_none() {
return Err(ScwNodeGroupErrors::MissingNodePoolInfo);
};
Ok(())
}
fn kubeconfig_bucket_name(&self) -> String {
format!("qovery-kubeconfigs-{}", self.id())
}
@@ -256,7 +474,7 @@ impl<'a> Kapsule<'a> {
// Kubernetes
context.insert("test_cluster", &self.context.is_test_cluster());
context.insert("kubernetes_cluster_id", self.id());
context.insert("kubernetes_cluster_name", self.name());
context.insert("kubernetes_cluster_name", self.cluster_name().as_str());
context.insert("kubernetes_cluster_version", self.version());
// Qovery
@@ -579,6 +797,192 @@ impl<'a> Kapsule<'a> {
return Err(error);
}
let cluster_info = self.get_scw_cluster_info()?;
if cluster_info.is_none() {
let msg = "no cluster found from the Scaleway API".to_string();
return Err(EngineError::new_no_cluster_found_error(
event_details.clone(),
CommandError::new(msg.clone(), Some(msg)),
));
}
let current_nodegroups = match self
.get_existing_sanitized_node_groups(cluster_info.expect("A cluster should be present at this create stage"))
{
Ok(x) => x,
Err(e) => {
match e {
ScwNodeGroupErrors::CloudProviderApiError(c) => {
return Err(EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(c),
))
}
ScwNodeGroupErrors::ClusterDoesNotExists(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deploying(
event_details.clone(),
EventMessage::new_from_safe(
"cluster do not exists, no node groups can be retrieved for upgrade check".to_string(),
),
),
),
ScwNodeGroupErrors::MultipleClusterFound => {
let msg = "multiple clusters found, can't match the correct node groups".to_string();
return Err(EngineError::new_multiple_cluster_found_expected_one_error(
event_details.clone(),
CommandError::new(msg.clone(), Some(msg)),
));
}
ScwNodeGroupErrors::NoNodePoolFound(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deploying(
event_details.clone(),
EventMessage::new_from_safe(
"cluster exists, but no node groups found for upgrade check".to_string(),
),
),
),
ScwNodeGroupErrors::MissingNodePoolInfo => {
let msg = format!("Error with Scaleway API while trying to retrieve node pool info");
return Err(EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(CommandError::new_from_safe_message(msg)),
));
}
ScwNodeGroupErrors::NodeGroupValidationError(c) => {
return Err(EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(c),
));
}
};
Vec::with_capacity(0)
}
};
// ensure all node groups are in ready state Scaleway side
self.logger.log(
LogLevel::Info,
EngineEvent::Deploying(
event_details.clone(),
EventMessage::new_from_safe(
"ensuring all groups nodes are in ready state from the Scaleway API".to_string(),
),
),
);
for ng in current_nodegroups {
let res = retry::retry(
// retry 10 min max per nodegroup until they are ready
Fixed::from_millis(15000).take(40),
|| {
self.logger().log(
LogLevel::Info,
EngineEvent::Deploying(
event_details.clone(),
EventMessage::new_from_safe(format!(
"checking node group {}/{:?}, current status: {:?}",
&ng.name,
&ng.id.as_ref().unwrap_or(&"unknown".to_string()),
&ng.status
)),
),
);
let pool_id = match &ng.id {
None => {
let msg =
"node group id was expected to get info, but not found from Scaleway API".to_string();
return OperationResult::Retry(
EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(CommandError::new_from_safe_message(msg)),
),
);
}
Some(x) => x,
};
let scw_ng = match self.get_node_group_info(pool_id.as_str()) {
Ok(x) => x,
Err(e) => {
return match e {
ScwNodeGroupErrors::CloudProviderApiError(c) => {
let current_error = EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(c),
);
self.logger
.log(LogLevel::Error, EngineEvent::Error(current_error.clone()));
OperationResult::Retry(current_error)
}
ScwNodeGroupErrors::ClusterDoesNotExists(c) => {
let current_error =
EngineError::new_no_cluster_found_error(event_details.clone(), c);
self.logger
.log(LogLevel::Error, EngineEvent::Error(current_error.clone()));
OperationResult::Retry(current_error)
}
ScwNodeGroupErrors::MultipleClusterFound => {
OperationResult::Retry(EngineError::new_multiple_cluster_found_expected_one_error(
event_details.clone(),
CommandError::new_from_safe_message(
"Multiple cluster found while one was expected".to_string(),
),
))
}
ScwNodeGroupErrors::NoNodePoolFound(_) => OperationResult::Ok(()),
ScwNodeGroupErrors::MissingNodePoolInfo => {
OperationResult::Retry(EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
None,
))
}
ScwNodeGroupErrors::NodeGroupValidationError(c) => {
let current_error = EngineError::new_missing_api_info_from_cloud_provider_error(
event_details.clone(),
Some(c),
);
self.logger
.log(LogLevel::Error, EngineEvent::Error(current_error.clone()));
OperationResult::Retry(current_error)
}
}
}
};
match scw_ng.status == scaleway_api_rs::models::scaleway_k8s_v1_pool::Status::Ready {
true => OperationResult::Ok(()),
false => OperationResult::Retry(EngineError::new_k8s_node_not_ready(
event_details.clone(),
CommandError::new_from_safe_message(format!(
"waiting for node group {} to be ready, current status: {:?}",
&scw_ng.name, scw_ng.status
)),
)),
}
},
);
match res {
Ok(_) => {}
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(msg)) => {
return Err(EngineError::new_k8s_node_not_ready(
event_details.clone(),
CommandError::new(msg, Some("Waiting for too long worker nodes to be ready".to_string())),
))
}
}
}
self.logger.log(
LogLevel::Info,
EngineEvent::Deploying(
event_details.clone(),
EventMessage::new_from_safe(
"all node groups for this cluster are ready from cloud provider API".to_string(),
),
),
);
// ensure all nodes are ready on Kubernetes
match self.check_workers_on_create() {
Ok(_) => {
self.send_to_customer(
@@ -793,7 +1197,10 @@ impl<'a> Kapsule<'a> {
};
if tf_workers_resources.is_empty() {
return Err(EngineError::new_cluster_has_no_worker_nodes(event_details.clone()));
return Err(EngineError::new_cluster_has_no_worker_nodes(
event_details.clone(),
None,
));
}
let kubernetes_config_file_path = self.get_kubeconfig_file_path()?;
@@ -1090,15 +1497,14 @@ impl<'a> Kapsule<'a> {
);
// delete custom metrics api to avoid stale namespaces on deletion
let _ = cmd::helm::helm_uninstall_list(
let helm = Helm::new(
&kubernetes_config_file_path,
vec![HelmChart {
name: "metrics-server".to_string(),
namespace: "kube-system".to_string(),
version: None,
}],
self.cloud_provider().credentials_environment_variables(),
);
&self.cloud_provider.credentials_environment_variables(),
)
.map_err(|e| to_engine_error(&event_details, e))?;
let chart = ChartInfo::new_from_release_name("metrics-server", "kube-system");
helm.uninstall(&chart, &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
// required to avoid namespace stuck on deletion
uninstall_cert_manager(
@@ -1118,50 +1524,27 @@ impl<'a> Kapsule<'a> {
let qovery_namespaces = get_qovery_managed_namespaces();
for qovery_namespace in qovery_namespaces.iter() {
let charts_to_delete = cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
Some(qovery_namespace),
);
match charts_to_delete {
Ok(charts) => {
for chart in charts {
match cmd::helm::helm_exec_uninstall(
&kubernetes_config_file_path,
&chart.namespace,
&chart.name,
self.cloud_provider().credentials_environment_variables(),
) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`", chart.name);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, Some(e.message())),
),
)
}
}
}
}
Err(e) => {
if !(e.message().contains("not found")) {
let charts_to_delete = helm
.list_release(Some(qovery_namespace), &vec![])
.map_err(|e| to_engine_error(&event_details, e))?;
for chart in charts_to_delete {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!("Chart `{}` deleted", chart.name)),
),
),
Err(e) => {
let message_safe = format!("Can't delete chart `{}`", chart.name);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new_from_safe(format!(
"Can't delete the namespace {}",
qovery_namespace
)),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1216,18 +1599,11 @@ impl<'a> Kapsule<'a> {
),
);
match cmd::helm::helm_list(
&kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
None,
) {
match helm.list_release(None, &vec![]) {
Ok(helm_charts) => {
for chart in helm_charts {
match cmd::helm::helm_uninstall_list(
&kubernetes_config_file_path,
vec![chart.clone()],
self.cloud_provider().credentials_environment_variables(),
) {
let chart_info = ChartInfo::new_from_release_name(&chart.name, &chart.namespace);
match helm.uninstall(&chart_info, &vec![]) {
Ok(_) => self.logger().log(
LogLevel::Info,
EngineEvent::Deleting(
@@ -1236,12 +1612,12 @@ impl<'a> Kapsule<'a> {
),
),
Err(e) => {
let message_safe = format!("Error deleting chart `{}` deleted", chart.name);
let message_safe = format!("Error deleting chart `{}`: {}", chart.name, e);
self.logger().log(
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe, e.message),
EventMessage::new(message_safe, Some(e.to_string())),
),
)
}
@@ -1254,7 +1630,7 @@ impl<'a> Kapsule<'a> {
LogLevel::Error,
EngineEvent::Deleting(
event_details.clone(),
EventMessage::new(message_safe.to_string(), Some(e.message())),
EventMessage::new(message_safe.to_string(), Some(e.to_string())),
),
)
}

View File

@@ -88,6 +88,47 @@ impl FromStr for ScwInstancesType {
}
}
#[derive(Clone)]
pub struct ScwNodeGroup {
pub name: String,
pub id: Option<String>,
pub min_nodes: i32,
pub max_nodes: i32,
pub instance_type: String,
pub disk_size_in_gib: i32,
pub status: scaleway_api_rs::models::scaleway_k8s_v1_pool::Status,
}
impl ScwNodeGroup {
pub fn new(
id: Option<String>,
group_name: String,
min_nodes: i32,
max_nodes: i32,
instance_type: String,
disk_size_in_gib: i32,
status: scaleway_api_rs::models::scaleway_k8s_v1_pool::Status,
) -> Result<Self, CommandError> {
if min_nodes > max_nodes {
let msg = format!(
"The number of minimum nodes ({}) for group name {} is higher than maximum nodes ({})",
&group_name, &min_nodes, &max_nodes
);
return Err(CommandError::new_from_safe_message(msg));
}
Ok(ScwNodeGroup {
name: group_name,
id,
min_nodes,
max_nodes,
instance_type,
disk_size_in_gib,
status,
})
}
}
#[cfg(test)]
mod tests {
#[cfg(test)]
@@ -104,6 +145,7 @@ mod tests {
NodeGroups::new("".to_string(), 2, 2, "dev1-l".to_string(), 20).unwrap(),
NodeGroups {
name: "".to_string(),
id: None,
min_nodes: 2,
max_nodes: 2,
instance_type: "dev1-l".to_string(),

View File

@@ -1,5 +1,6 @@
use tera::Context as TeraContext;
use crate::cloud_provider::helm::ChartInfo;
use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate};
use crate::cloud_provider::service::{
default_tera_context, delete_router, deploy_stateless_service_error, send_progress_on_long_task, Action, Create,
@@ -7,8 +8,9 @@ use crate::cloud_provider::service::{
};
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm;
use crate::cmd::helm::Timeout;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::error::{EngineError, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners};
@@ -293,25 +295,26 @@ impl Create for Router {
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
let helm = helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
let chart = ChartInfo::new_from_custom_namespace(
helm_release_name,
workspace_dir.clone(),
environment.namespace().to_string(),
600_i64,
match self.service_type() {
ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)],
_ => vec![],
},
false,
self.selector(),
);
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
}
Ok(())
helm.upgrade(&chart, &vec![])
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())
}
fn on_create_check(&self) -> Result<(), EngineError> {

View File

@@ -8,9 +8,12 @@ use tera::Context as TeraContext;
use crate::build_platform::Image;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::helm::ChartInfo;
use crate::cloud_provider::kubernetes::Kubernetes;
use crate::cloud_provider::utilities::check_domain_for;
use crate::cloud_provider::DeploymentTarget;
use crate::cmd;
use crate::cmd::helm;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::Statefulset;
use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replicas_by_selector, ScalingKind};
@@ -365,30 +368,11 @@ pub fn deploy_user_stateless_service<T>(target: &DeploymentTarget, service: &T)
where
T: Service + Helm,
{
deploy_stateless_service(
target,
service,
service.engine_error(
EngineErrorCause::User(
"Your application has failed to start. \
Ensure you can run it without issues with `qovery run` and check its logs from the web interface or the CLI with `qovery log`. \
This issue often occurs due to ports misconfiguration. Make sure you exposed the correct port (using EXPOSE statement in Dockerfile or via Qovery configuration).",
),
format!(
"{} {} has failed to start ",
service.service_type().name(),
service.name_with_id()
),
),
)
deploy_stateless_service(target, service)
}
/// deploy a stateless service (app, router, database...) on Kubernetes
pub fn deploy_stateless_service<T>(
target: &DeploymentTarget,
service: &T,
thrown_error: EngineError,
) -> Result<(), EngineError>
pub fn deploy_stateless_service<T>(target: &DeploymentTarget, service: &T) -> Result<(), EngineError>
where
T: Service + Helm,
{
@@ -441,26 +425,26 @@ where
})?;
// do exec helm upgrade and return the last deployment status
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
service.selector(),
workspace_dir.as_str(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
let helm = helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error())?;
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
let chart = ChartInfo::new_from_custom_namespace(
helm_release_name,
workspace_dir.clone(),
environment.namespace().to_string(),
600_i64,
match service.service_type() {
ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)],
_ => vec![],
},
false,
service.selector(),
);
// check deployment status
if helm_history_row.is_none()
|| !helm_history_row
.expect("Error getting helm history row")
.is_successfully_deployed()
{
return Err(thrown_error);
}
helm.upgrade(&chart, &vec![])
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry(
kubernetes_config_file_path.as_str(),
@@ -482,48 +466,12 @@ where
}
/// do specific operations on a stateless service deployment error
pub fn deploy_stateless_service_error<T>(target: &DeploymentTarget, service: &T) -> Result<(), EngineError>
pub fn deploy_stateless_service_error<T>(_target: &DeploymentTarget, _service: &T) -> Result<(), EngineError>
where
T: Service + Helm,
{
let kubernetes = target.kubernetes;
let environment = target.environment;
let helm_release_name = service.helm_release_name();
let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
Ok(path) => path,
Err(e) => return Err(e.to_legacy_engine_error()),
};
let history_rows = crate::cmd::helm::helm_exec_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_history_error(
event_details.clone(),
helm_release_name.to_string(),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?;
if history_rows.len() == 1 {
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_uninstall_error(event_details.clone(), helm_release_name.to_string(), e)
.to_legacy_engine_error()
})?;
}
// Nothing to do as we sait --atomic on chart release that we do
// So helm rollback for us if a deployment fails
Ok(())
}
@@ -789,30 +737,26 @@ where
})?;
// do exec helm upgrade and return the last deployment status
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.to_string(),
environment.namespace(),
service.helm_release_name().as_str(),
service.selector(),
workspace_dir.to_string(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
let helm = helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
let chart = ChartInfo::new_from_custom_namespace(
service.helm_release_name(),
workspace_dir.clone(),
environment.namespace().to_string(),
600_i64,
match service.service_type() {
ServiceType::Database(_) => vec![format!("{}/q-values.yaml", &workspace_dir)],
_ => vec![],
},
false,
service.selector(),
);
// check deployment status
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(service.engine_error(
EngineErrorCause::Internal,
format!(
"{} service fails to be deployed (before start)",
service.service_type().name()
),
));
}
helm.upgrade(&chart, &vec![])
.map_err(|e| helm::to_engine_error(&event_details, e).to_legacy_engine_error())?;
// check app status
match crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry(
@@ -1306,34 +1250,15 @@ pub fn helm_uninstall_release(
.get_kubeconfig_file_path()
.map_err(|e| e.to_legacy_engine_error())?;
let history_rows = crate::cmd::helm::helm_exec_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
let helm = cmd::helm::Helm::new(
&kubernetes_config_file_path,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_history(event_details.clone(), environment.namespace().to_string(), e)
.to_legacy_engine_error()
})?;
.map_err(|e| NewEngineError::new_helm_error(event_details.clone(), e).to_legacy_engine_error())?;
// if there is no valid history - then delete the helm chart
let first_valid_history_row = history_rows.iter().find(|x| x.is_successfully_deployed());
if first_valid_history_row.is_some() {
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_uninstall_error(event_details.clone(), helm_release_name.to_string(), e)
.to_legacy_engine_error()
})?;
}
Ok(())
let chart = ChartInfo::new_from_release_name(helm_release_name, environment.namespace());
helm.uninstall(&chart, &vec![])
.map_err(|e| NewEngineError::new_helm_error(event_details.clone(), e).to_legacy_engine_error())
}
/// This function call (start|pause|delete)_in_progress function every 10 seconds when a

View File

@@ -319,16 +319,23 @@ impl fmt::Display for VersionsNumber {
}
}
fn cloudflare_dns_resolver() -> Resolver {
fn google_dns_resolver() -> Resolver {
let mut resolver_options = ResolverOpts::default();
// We want to avoid cache and using host file of the host, as some provider force caching
// which lead to stale response
resolver_options.cache_size = 0;
resolver_options.use_hosts_file = false;
resolver_options.use_hosts_file = true;
//resolver_options.ip_strategy = LookupIpStrategy::Ipv4Only;
//let dns = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 254));
//let resolver = ResolverConfig::from_parts(
// None,
// vec![],
// NameServerConfigGroup::from_ips_clear(&vec![dns], 53, true),
//);
Resolver::new(ResolverConfig::cloudflare(), resolver_options)
.expect("Invalid cloudflare DNS resolver configuration")
//Resolver::new(resolver, resolver_options).unwrap()
Resolver::new(ResolverConfig::google(), resolver_options).expect("Invalid google DNS resolver configuration")
}
fn get_cname_record_value(resolver: &Resolver, cname: &str) -> Option<String> {
@@ -352,7 +359,7 @@ pub fn check_cname_for(
cname_to_check: &str,
execution_id: &str,
) -> Result<String, String> {
let resolver = cloudflare_dns_resolver();
let resolver = google_dns_resolver();
let listener_helper = ListenersHelper::new(listeners);
let send_deployment_progress = |msg: &str| {
@@ -420,7 +427,7 @@ pub fn check_domain_for(
execution_id: &str,
context_id: &str,
) -> Result<(), EngineError> {
let resolver = cloudflare_dns_resolver();
let resolver = google_dns_resolver();
for domain in domains_to_check {
listener_helper.deployment_in_progress(ProgressInfo::new(
@@ -578,7 +585,7 @@ pub fn print_action(cloud_provider_name: &str, struct_name: &str, fn_name: &str,
mod tests {
use crate::cloud_provider::models::CpuLimits;
use crate::cloud_provider::utilities::{
cloudflare_dns_resolver, convert_k8s_cpu_value_to_f32, get_cname_record_value,
convert_k8s_cpu_value_to_f32, get_cname_record_value, google_dns_resolver,
validate_k8s_required_cpu_and_burstable, VersionsNumber,
};
use crate::error::StringError;
@@ -626,7 +633,7 @@ mod tests {
#[test]
pub fn test_cname_resolution() {
let resolver = cloudflare_dns_resolver();
let resolver = google_dns_resolver();
let cname = get_cname_record_value(&resolver, "ci-test-no-delete.qovery.io");
assert_eq!(cname, Some(String::from("qovery.io.")));

File diff suppressed because it is too large Load Diff

View File

@@ -76,6 +76,9 @@ pub enum Tag {
CannotGetCluster,
ObjectStorageCannotCreateBucket,
ObjectStorageCannotPutFileIntoBucket,
NoClusterFound,
OnlyOneClusterExpected,
CloudProviderApiMissingInfo,
}
impl From<errors::Tag> for Tag {
@@ -139,6 +142,9 @@ impl From<errors::Tag> for Tag {
errors::Tag::UnsupportedZone => Tag::UnsupportedZone,
errors::Tag::K8sNodeIsNotReadyWithTheRequestedVersion => Tag::K8sNodeIsNotReadyWithTheRequestedVersion,
errors::Tag::K8sNodeIsNotReady => Tag::K8sNodeIsNotReady,
errors::Tag::NoClusterFound => Tag::NoClusterFound,
errors::Tag::OnlyOneClusterExpected => Tag::OnlyOneClusterExpected,
errors::Tag::CloudProviderApiMissingInfo => Tag::CloudProviderApiMissingInfo,
}
}
}

View File

@@ -3,6 +3,7 @@ pub mod io;
extern crate url;
use crate::cloud_provider::utilities::VersionsNumber;
use crate::cmd::helm::HelmError;
use crate::error::{EngineError as LegacyEngineError, EngineErrorCause, EngineErrorScope};
use crate::events::EventDetails;
use url::Url;
@@ -88,6 +89,8 @@ pub enum Tag {
Unknown,
/// MissingRequiredEnvVariable: represents an error where a required env variable is not set.
MissingRequiredEnvVariable,
/// NoClusterFound: represents an error where no cluster was found
NoClusterFound,
/// ClusterHasNoWorkerNodes: represents an error where the current cluster doesn't have any worker nodes.
ClusterHasNoWorkerNodes,
/// CannotGetWorkspaceDirectory: represents an error while trying to get workspace directory.
@@ -188,10 +191,14 @@ pub enum Tag {
CannotGetSupportedVersions,
/// CannotGetCluster: represents an error where we cannot get cluster.
CannotGetCluster,
/// OnlyOneClusterExpected: represents an error where only one cluster was expected but several where found
OnlyOneClusterExpected,
/// ObjectStorageCannotCreateBucket: represents an error while trying to create a new object storage bucket.
ObjectStorageCannotCreateBucket,
/// ObjectStorageCannotPutFileIntoBucket: represents an error while trying to put a file into an object storage bucket.
ObjectStorageCannotPutFileIntoBucket,
/// CloudProviderApiMissingInfo: represents an error while expecting mandatory info
CloudProviderApiMissingInfo,
}
#[derive(Clone, Debug)]
@@ -353,14 +360,18 @@ impl EngineError {
/// Arguments:
///
/// * `event_details`: Error linked event details.
pub fn new_cluster_has_no_worker_nodes(event_details: EventDetails) -> EngineError {
/// * `raw_error`: Raw error message.
pub fn new_cluster_has_no_worker_nodes(
event_details: EventDetails,
raw_error: Option<CommandError>,
) -> EngineError {
let message = "No worker nodes present, can't proceed with operation.";
EngineError::new(
event_details,
Tag::ClusterHasNoWorkerNodes,
message.to_string(),
message.to_string(),
None,
raw_error,
None,
Some(
"This can happen if there where a manual operations on the workers or the infrastructure is paused."
@@ -369,6 +380,32 @@ impl EngineError {
)
}
/// Missing API info from the Cloud provider itself
///
///
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `raw_error`: Raw error message.
pub fn new_missing_api_info_from_cloud_provider_error(
event_details: EventDetails,
raw_error: Option<CommandError>,
) -> EngineError {
let message = "Error, missing required information from the Cloud Provider API";
EngineError::new(
event_details,
Tag::CloudProviderApiMissingInfo,
message.to_string(),
message.to_string(),
raw_error,
None,
Some(
"This can happen if the cloud provider is encountering issues. You should try again later".to_string(),
),
)
}
/// Creates new error for unsupported instance type.
///
/// Cloud provider doesn't support the requested instance type.
@@ -1467,6 +1504,29 @@ impl EngineError {
)
}
/// Creates new error from an Helm error
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `error`: Raw error message.
pub fn new_helm_error(event_details: EventDetails, error: HelmError) -> EngineError {
let cmd_error = match &error {
HelmError::CmdError(_, _, cmd_error) => Some(cmd_error.clone()),
_ => None,
};
EngineError::new(
event_details,
Tag::HelmChartUninstallError,
error.to_string(),
error.to_string(),
cmd_error,
None,
None,
)
}
/// Creates new error while uninstalling Helm chart.
///
/// Arguments:
@@ -1678,4 +1738,67 @@ impl EngineError {
Some("Maybe there is a lag and cluster is not yet reported, please retry later.".to_string()),
)
}
/// Creates new error while trying to get cluster.
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `raw_error`: Raw error message.
pub fn new_missing_workers_group_info_error(event_details: EventDetails, raw_error: CommandError) -> EngineError {
let message = "Error, cannot get cluster.";
EngineError::new(
event_details,
Tag::CannotGetCluster,
message.to_string(),
message.to_string(),
Some(raw_error),
None,
Some("Maybe there is a lag and cluster is not yet reported, please retry later.".to_string()),
)
}
/// No cluster found
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `raw_error`: Raw error message.
pub fn new_no_cluster_found_error(event_details: EventDetails, raw_error: CommandError) -> EngineError {
let message = "Error, no cluster found.";
EngineError::new(
event_details,
Tag::CannotGetCluster,
message.to_string(),
message.to_string(),
Some(raw_error),
None,
Some("Maybe there is a lag and cluster is not yet reported, please retry later.".to_string()),
)
}
/// Too many clusters found, while expected only one
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `raw_error`: Raw error message.
pub fn new_multiple_cluster_found_expected_one_error(
event_details: EventDetails,
raw_error: CommandError,
) -> EngineError {
let message = "Too many clusters found with this name, where 1 was expected";
EngineError::new(
event_details,
Tag::OnlyOneClusterExpected,
message.to_string(),
message.to_string(),
Some(raw_error),
None,
Some("Please contact Qovery support for investigation.".to_string()),
)
}
}

View File

@@ -102,7 +102,7 @@ fn test_build_cache() {
.as_str(),
);
let mut environment = test_utilities::common::working_minimal_environment(
let environment = test_utilities::common::working_minimal_environment(
&context,
secrets
.DEFAULT_TEST_DOMAIN
@@ -118,9 +118,9 @@ fn test_build_cache() {
let app_build = app.to_build();
let _ = match local_docker.has_cache(&app_build) {
Ok(CacheResult::Hit) => assert!(false),
Ok(CacheResult::Miss(parent_build)) => assert!(true),
Ok(CacheResult::Miss(_)) => assert!(true),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let _ = match ecr.pull(&image).unwrap() {
@@ -147,9 +147,9 @@ fn test_build_cache() {
let _ = match local_docker.has_cache(&build_result.build) {
Ok(CacheResult::Hit) => assert!(true),
Ok(CacheResult::Miss(parent_build)) => assert!(false),
Ok(CacheResult::Miss(_)) => assert!(false),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let start_pull_time = SystemTime::now();
@@ -1111,6 +1111,10 @@ fn deploy_a_non_working_environment_with_a_working_failover_on_aws_eks() {
fn deploy_2_non_working_environments_with_2_working_failovers_on_aws_eks() {
init();
let test_name = function_name!();
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let logger = logger();
let secrets = FuncTestsSecrets::new();

View File

@@ -104,7 +104,7 @@ fn test_build_cache() {
.expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"),
);
let mut environment = test_utilities::common::working_minimal_environment(
let environment = test_utilities::common::working_minimal_environment(
&context,
secrets
.DEFAULT_TEST_DOMAIN
@@ -120,9 +120,9 @@ fn test_build_cache() {
let app_build = app.to_build();
let _ = match local_docker.has_cache(&app_build) {
Ok(CacheResult::Hit) => assert!(false),
Ok(CacheResult::Miss(parent_build)) => assert!(true),
Ok(CacheResult::Miss(_)) => assert!(true),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let _ = match docr.pull(&image).unwrap() {
@@ -149,9 +149,9 @@ fn test_build_cache() {
let _ = match local_docker.has_cache(&build_result.build) {
Ok(CacheResult::Hit) => assert!(true),
Ok(CacheResult::Miss(parent_build)) => assert!(false),
Ok(CacheResult::Miss(_)) => assert!(false),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let start_pull_time = SystemTime::now();

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: nginx
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "toto.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "toto.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "toto.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "toto.labels" -}}
helm.sh/chart: {{ include "toto.chart" . }}
{{ include "toto.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "toto.selectorLabels" -}}
app.kubernetes.io/name: {{ include "toto.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "toto.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "toto.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "toto.fullname" . }}
labels:
{{- include "toto.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "toto.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "toto.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "toto.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
initialDelaySeconds: {{ .Values.initialDelaySeconds }}
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "toto.fullname" . }}
labels:
{{- include "toto.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "toto.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "toto.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "toto.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "toto.fullname" . }}
labels:
{{- include "toto.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "toto.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "toto.serviceAccountName" . }}
labels:
{{- include "toto.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "toto.fullname" . }}-test-connection"
labels:
{{- include "toto.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "toto.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -0,0 +1,83 @@
# Default values for toto.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
initialDelaySeconds: 5
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -109,7 +109,7 @@ fn test_build_cache() {
.as_str(),
);
let mut environment = test_utilities::common::working_minimal_environment(
let environment = test_utilities::common::working_minimal_environment(
&context,
secrets
.DEFAULT_TEST_DOMAIN
@@ -125,9 +125,9 @@ fn test_build_cache() {
let app_build = app.to_build();
let _ = match local_docker.has_cache(&app_build) {
Ok(CacheResult::Hit) => assert!(false),
Ok(CacheResult::Miss(parent_build)) => assert!(true),
Ok(CacheResult::Miss(_)) => assert!(true),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let _ = match scr.pull(&image).unwrap() {
@@ -154,9 +154,9 @@ fn test_build_cache() {
let _ = match local_docker.has_cache(&build_result.build) {
Ok(CacheResult::Hit) => assert!(true),
Ok(CacheResult::Miss(parent_build)) => assert!(false),
Ok(CacheResult::Miss(_)) => assert!(false),
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
Err(err) => assert!(false),
Err(_) => assert!(false),
};
let start_pull_time = SystemTime::now();