feat: infra migration to new logging part 2 (#583)

Ticket: ENG-1050
This commit is contained in:
Benjamin
2022-02-09 14:20:59 +01:00
committed by GitHub
parent 7a14d0327b
commit a819ec53c2
54 changed files with 5442 additions and 3008 deletions

View File

@@ -15,7 +15,7 @@ use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset};
use crate::error::EngineErrorCause::Internal;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port};
use ::function_name::named;
@@ -368,6 +368,7 @@ impl Pause for Application {
impl Delete for Application {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -376,7 +377,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, false)
delete_stateless_service(target, self, false, event_details.clone())
})
}
@@ -386,6 +387,7 @@ impl Delete for Application {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -394,7 +396,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, true)
delete_stateless_service(target, self, true, event_details.clone())
})
}
}

View File

@@ -15,7 +15,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -281,6 +281,7 @@ impl Terraform for MongoDB {
impl Create for MongoDB {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -289,7 +290,7 @@ impl Create for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -344,6 +345,7 @@ impl Pause for MongoDB {
impl Delete for MongoDB {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -352,7 +354,7 @@ impl Delete for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -16,7 +16,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, DatabaseKind, Listen, Listener, Listeners};
use ::function_name::named;
@@ -292,6 +292,7 @@ impl Terraform for MySQL {
impl Create for MySQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -300,7 +301,7 @@ impl Create for MySQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -355,6 +356,7 @@ impl Pause for MySQL {
impl Delete for MySQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -363,7 +365,7 @@ impl Delete for MySQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -16,7 +16,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -279,6 +279,7 @@ impl Terraform for PostgreSQL {
impl Create for PostgreSQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -287,7 +288,7 @@ impl Create for PostgreSQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -343,6 +344,7 @@ impl Pause for PostgreSQL {
impl Delete for PostgreSQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -351,7 +353,7 @@ impl Delete for PostgreSQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -13,7 +13,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -291,6 +291,7 @@ impl Terraform for Redis {
impl Create for Redis {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -299,7 +300,7 @@ impl Create for Redis {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -354,6 +355,7 @@ impl Pause for Redis {
impl Delete for Redis {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -362,7 +364,7 @@ impl Delete for Redis {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -6,7 +6,7 @@ use crate::cloud_provider::helm::{
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::cmd::kubectl::{kubectl_delete_crash_looping_pods, kubectl_exec_get_daemonset, kubectl_exec_with_output};
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::fs::File;
@@ -62,14 +62,16 @@ pub fn aws_helm_charts(
chart_prefix_path: Option<&str>,
kubernetes_config: &Path,
envs: &[(String, String)],
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
info!("preparing chart configuration to be deployed");
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, CommandError> {
let content_file = match File::open(&qovery_terraform_config_file) {
Ok(x) => x,
Err(e) => return Err(SimpleError{ kind: SimpleErrorKind::Other, message: Some(
format!("Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?. {:?}", e)
)}),
Err(e) => {
let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?";
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
let chart_prefix = chart_prefix_path.unwrap_or("./");
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
@@ -77,14 +79,14 @@ pub fn aws_helm_charts(
let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
error!(
"error while parsing terraform config file {}: {:?}",
&qovery_terraform_config_file, &e
let message_safe = format!(
"Error while parsing terraform config file {}",
qovery_terraform_config_file
);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("{:?}", e)),
});
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
@@ -968,22 +970,13 @@ datasources:
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let qovery_agent_version: QoveryAgent = match get_qovery_app_version(
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,
&chart_config_prerequisites.infra_options.agent_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery agent version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let mut qovery_agent = CommonChart {
chart_info: ChartInfo {
name: "qovery-agent".to_string(),
@@ -1042,6 +1035,7 @@ datasources:
..Default::default()
},
};
if chart_config_prerequisites.ff_log_history_enabled {
qovery_agent.chart_info.values.push(ChartSetValue {
key: "environmentVariables.FEATURES".to_string(),
@@ -1049,22 +1043,13 @@ datasources:
})
}
let qovery_engine_version: QoveryEngine = match get_qovery_app_version(
let qovery_engine_version: QoveryEngine = get_qovery_app_version(
QoveryAppName::Engine,
&chart_config_prerequisites.infra_options.engine_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery engine version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let qovery_engine = CommonChart {
chart_info: ChartInfo {
name: "qovery-engine".to_string(),
@@ -1235,7 +1220,7 @@ impl HelmChart for AwsVpcCniChart {
kubernetes_config: &Path,
envs: &[(String, String)],
_payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let kinds = vec!["daemonSet", "clusterRole", "clusterRoleBinding", "serviceAccount"];
let mut environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
environment_variables.push(("KUBECONFIG", kubernetes_config.to_str().unwrap()));
@@ -1254,72 +1239,77 @@ impl HelmChart for AwsVpcCniChart {
match self.enable_cni_managed_by_helm(kubernetes_config, envs) {
true => {
info!("Enabling AWS CNI support by Helm");
for kind in kinds {
info!("setting annotations and labels on {}/aws-node", &kind);
let steps = || -> Result<(), SimpleError> {
// Setting annotations and labels on kind/aws-node
let steps = || -> Result<(), CommandError> {
let label = format!("meta.helm.sh/release-name={}", self.chart_info.name);
let args = vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
label.as_str(),
];
let mut stdout = "".to_string();
let mut stderr = "".to_string();
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
args.clone(),
environment_variables.clone(),
|_| {},
|_| {},
|out| stdout = format!("{}\n{}", stdout, out),
|out| stderr = format!("{}\n{}", stderr, out),
)?;
let args = vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
"meta.helm.sh/release-namespace=kube-system",
];
let mut stdout = "".to_string();
let mut stderr = "".to_string();
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
kind,
"aws-node",
"meta.helm.sh/release-namespace=kube-system",
],
args.clone(),
environment_variables.clone(),
|_| {},
|_| {},
|out| stdout = format!("{}\n{}", stdout, out),
|out| stderr = format!("{}\n{}", stderr, out),
)?;
let args = vec![
"-n",
"kube-system",
"label",
"--overwrite",
kind,
"aws-node",
"app.kubernetes.io/managed-by=Helm",
];
let mut stdout = "".to_string();
let mut stderr = "".to_string();
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
kind,
"aws-node",
"app.kubernetes.io/managed-by=Helm",
],
args.clone(),
environment_variables.clone(),
|_| {},
|_| {},
|out| stdout = format!("{}\n{}", stdout, out),
|out| stderr = format!("{}\n{}", stderr, out),
)?;
Ok(())
};
if let Err(e) = steps() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while adding annotations for AWS VPC CNI. {:?}",
e.message
)),
});
}
steps()?;
}
info!("AWS CNI successfully deployed");
// sleep in order to be sure the daemonset is updated
sleep(Duration::from_secs(30))
}
false => info!("AWS CNI is already supported by Helm, nothing to do"),
false => {} // AWS CNI is already supported by Helm, nothing to do
};
Ok(None)
@@ -1327,12 +1317,12 @@ impl HelmChart for AwsVpcCniChart {
}
impl AwsVpcCniChart {
// this is required to know if we need to keep old annotation/labels values or not
/// this is required to know if we need to keep old annotation/labels values or not
fn is_cni_old_installed_version(
&self,
kubernetes_config: &Path,
envs: &[(String, String)],
) -> Result<bool, SimpleError> {
) -> Result<bool, CommandError> {
let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
match kubectl_exec_get_daemonset(
@@ -1342,32 +1332,27 @@ impl AwsVpcCniChart {
None,
environment_variables,
) {
Ok(x) => {
if x.spec.is_none() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"spec was not found in json output while looking at daemonset {}",
&self.chart_info.name
)),
});
Ok(x) => match x.spec {
None => {
return Err(CommandError::new_from_safe_message(format!(
"Spec was not found in json output while looking at daemonset {}",
&self.chart_info.name
)))
}
match x.spec.unwrap().selector.match_labels.k8s_app {
Some(spec) => match spec.selector.match_labels.k8s_app {
Some(x) if x == "aws-node" => Ok(true),
_ => Ok(false),
}
}
},
},
Err(e) => {
let msg = format!(
"error while getting daemonset info for chart {}, won't deploy CNI chart. {:?}",
&self.chart_info.name, e
let message_safe = format!(
"Error while getting daemonset info for chart {}, won't deploy CNI chart.",
&self.chart_info.name
);
error!("{}", &msg);
Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
})
Err(CommandError::new(
format!("{}, error: {:?}", message_safe, e),
Some(message_safe),
))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
use crate::cloud_provider::kubernetes::InstanceType;
use crate::errors::CommandError;
use core::fmt;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
@@ -54,9 +55,9 @@ impl fmt::Display for AwsInstancesType {
}
impl FromStr for AwsInstancesType {
type Err = ();
type Err = CommandError;
fn from_str(s: &str) -> Result<AwsInstancesType, ()> {
fn from_str(s: &str) -> Result<AwsInstancesType, CommandError> {
match s {
"t2.large" => Ok(AwsInstancesType::T2Large),
"t2x.large" => Ok(AwsInstancesType::T2Xlarge),
@@ -64,7 +65,10 @@ impl FromStr for AwsInstancesType {
"t3.xlarge" => Ok(AwsInstancesType::T3Xlarge),
"t3a.large" => Ok(AwsInstancesType::T3aLarge),
"t3a.2xlarge" => Ok(AwsInstancesType::T3a2xlarge),
_ => Err(()),
_ => {
let message = format!("`{}` instance type is not supported", s);
return Err(CommandError::new(message.clone(), Some(message)));
}
}
}
}

View File

@@ -1,5 +1,5 @@
use self::rusoto_iam::{CreateServiceLinkedRoleRequest, GetRoleRequest, Iam, IamClient};
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use rusoto_core::{Client, HttpClient, Region};
use rusoto_credential::StaticProvider;
use tokio::runtime::Runtime;
@@ -13,13 +13,12 @@ pub struct Role {
}
pub fn get_default_roles_to_create() -> Vec<Role> {
vec![Role {
role_name: "AWSServiceRoleForAmazonElasticsearchService".to_string(),
service_name: "es.amazonaws.com".to_string(),
description:
"role permissions policy allows Amazon ES to complete create, delete, describe, modify on ec2 and elb"
.to_string(),
}]
vec![Role::new(
"AWSServiceRoleForAmazonElasticsearchService".to_string(),
"es.amazonaws.com".to_string(),
"role permissions policy allows Amazon ES to complete create, delete, describe, modify on ec2 and elb"
.to_string(),
)]
}
impl Role {
@@ -31,7 +30,7 @@ impl Role {
}
}
pub async fn is_exist(&self, access_key: &str, secret_key: &str) -> Result<bool, SimpleError> {
pub async fn is_exist(&self, access_key: &str, secret_key: &str) -> Result<bool, CommandError> {
let credentials = StaticProvider::new(access_key.to_string(), secret_key.to_string(), None, None);
let client = Client::new_with(credentials, HttpClient::new().unwrap());
let iam_client = IamClient::new_with_client(client, Region::UsEast1);
@@ -40,33 +39,33 @@ impl Role {
role_name: self.role_name.clone(),
})
.await;
return match role {
match role {
Ok(_) => Ok(true),
Err(e) => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!(
"Unable to know if {} exist on AWS Account: {:?}",
&self.role_name, e
)),
Err(e) => Err(CommandError::new(
format!("Unable to know if {} exist on AWS account: {:?}", &self.role_name, e),
Some(format!("Unable to know if {} exist on AWS account.", &self.role_name,)),
)),
};
}
}
pub fn create_service_linked_role(&self, access_key: &str, secret_key: &str) -> Result<bool, SimpleError> {
pub fn create_service_linked_role(&self, access_key: &str, secret_key: &str) -> Result<bool, CommandError> {
let future_is_exist = self.is_exist(access_key, secret_key);
let exist = Runtime::new()
.expect("Failed to create Tokio runtime to check if role exist")
.block_on(future_is_exist);
return match exist {
match exist {
Ok(true) => {
info!("Role {} already exist, nothing to do", &self.role_name);
// Role already exist, nothing to do
Ok(true)
}
_ => {
info!("Role {} doesn't exist, let's create it !", &self.role_name);
// Role doesn't exist, let's create it !
let credentials = StaticProvider::new(access_key.to_string(), secret_key.to_string(), None, None);
let client = Client::new_with(credentials, HttpClient::new().unwrap());
let iam_client = IamClient::new_with_client(client, Region::UsEast1);
let future_create = iam_client.create_service_linked_role(CreateServiceLinkedRoleRequest {
aws_service_name: self.service_name.clone(),
custom_suffix: None,
@@ -75,17 +74,18 @@ impl Role {
let created = Runtime::new()
.expect("Failed to create Tokio runtime to check if role exist")
.block_on(future_create);
return match created {
Ok(_) => Ok(true),
Err(e) => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!(
"Unable to know if {} exist on AWS Account: {:?}",
&self.role_name, e
)),
)),
Err(e) => {
let safe_message = format!("Unable to know if `{}` exist on AWS Account", &self.role_name);
return Err(CommandError::new(
format!("{}, error: {:?}", safe_message, e),
Some(safe_message),
));
}
};
}
};
}
}
}

View File

@@ -3,6 +3,7 @@ use crate::cloud_provider::aws::regions::RegionAndZoneErrors::*;
use crate::models::ToTerraformString;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::fmt::{Display, Formatter};
use std::str::FromStr;
use strum_macros::EnumIter;
@@ -290,6 +291,17 @@ pub enum RegionAndZoneErrors {
ZoneNotSupported,
}
impl Display for RegionAndZoneErrors {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_str(match self {
RegionNotFound => "Region not found",
RegionNotSupported => "Region not supported",
ZoneNotFound => "Zone not found",
ZoneNotSupported => "Zone not supported",
})
}
}
impl AwsZones {
pub fn to_string(&self) -> String {
match self {

View File

@@ -8,8 +8,9 @@ use crate::cloud_provider::service::{
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -295,6 +296,7 @@ impl Create for Router {
function_name!(),
self.name(),
);
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let kubernetes = target.kubernetes;
let environment = target.environment;
let workspace_dir = self.workspace_directory();
@@ -310,27 +312,32 @@ impl Create for Router {
let context = self.tera_context(target)?;
let from_dir = format!("{}/aws/charts/q-ingress-tls", self.context.lib_root_dir());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), &context),
)?;
if let Err(e) =
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context)
{
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
from_dir.to_string(),
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
),
)?;
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
@@ -406,13 +413,14 @@ impl Pause for Router {
impl Delete for Router {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, false)
delete_router(target, self, false, event_details)
}
fn on_delete_check(&self) -> Result<(), EngineError> {
@@ -421,12 +429,13 @@ impl Delete for Router {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, true)
delete_router(target, self, true, event_details)
}
}

View File

@@ -15,7 +15,8 @@ use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset};
use crate::error::EngineErrorCause::Internal;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::errors::CommandError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port};
use ::function_name::named;
use std::fmt;
@@ -370,6 +371,7 @@ impl Pause for Application {
impl Delete for Application {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -378,7 +380,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, false)
delete_stateless_service(target, self, false, event_details.clone())
})
}
@@ -388,6 +390,7 @@ impl Delete for Application {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -396,7 +399,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, true)
delete_stateless_service(target, self, true, event_details.clone())
})
}
}
@@ -474,9 +477,9 @@ impl fmt::Display for DoRegion {
}
impl FromStr for DoRegion {
type Err = ();
type Err = CommandError;
fn from_str(s: &str) -> Result<DoRegion, ()> {
fn from_str(s: &str) -> Result<DoRegion, CommandError> {
match s {
"nyc1" => Ok(DoRegion::NewYorkCity1),
"nyc2" => Ok(DoRegion::NewYorkCity2),
@@ -491,7 +494,12 @@ impl FromStr for DoRegion {
"fra1" => Ok(DoRegion::Frankfurt),
"tor1" => Ok(DoRegion::Toronto),
"blr1" => Ok(DoRegion::Bangalore),
_ => Err(()),
_ => {
return Err(CommandError::new_from_safe_message(format!(
"`{}` region is not supported",
s
)));
}
}
}
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -264,6 +264,7 @@ impl Terraform for MongoDB {
impl Create for MongoDB {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -272,7 +273,7 @@ impl Create for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -328,6 +329,7 @@ impl Pause for MongoDB {
impl Delete for MongoDB {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -336,7 +338,7 @@ impl Delete for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -264,6 +264,7 @@ impl Terraform for MySQL {
impl Create for MySQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -274,7 +275,7 @@ impl Create for MySQL {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Create,
Box::new(|| deploy_stateful_service(target, self)),
Box::new(|| deploy_stateful_service(target, self, event_details.clone())),
)
}
@@ -331,6 +332,7 @@ impl Pause for MySQL {
impl Delete for MySQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -341,7 +343,7 @@ impl Delete for MySQL {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Delete,
Box::new(|| delete_stateful_service(target, self)),
Box::new(|| delete_stateful_service(target, self, event_details.clone())),
)
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -266,6 +266,7 @@ impl Terraform for PostgreSQL {
impl Create for PostgreSQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -276,7 +277,7 @@ impl Create for PostgreSQL {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Create,
Box::new(|| deploy_stateful_service(target, self)),
Box::new(|| deploy_stateful_service(target, self, event_details.clone())),
)
}
@@ -332,6 +333,7 @@ impl Pause for PostgreSQL {
impl Delete for PostgreSQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -342,7 +344,7 @@ impl Delete for PostgreSQL {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Delete,
Box::new(|| delete_stateful_service(target, self)),
Box::new(|| delete_stateful_service(target, self, event_details.clone())),
)
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -263,6 +263,7 @@ impl Terraform for Redis {
impl Create for Redis {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -273,7 +274,7 @@ impl Create for Redis {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Create,
Box::new(|| deploy_stateful_service(target, self)),
Box::new(|| deploy_stateful_service(target, self, event_details.clone())),
)
}
@@ -328,6 +329,7 @@ impl Pause for Redis {
impl Delete for Redis {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -338,7 +340,7 @@ impl Delete for Redis {
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Pause,
Box::new(|| delete_stateful_service(target, self)),
Box::new(|| delete_stateful_service(target, self, event_details.clone())),
)
}

View File

@@ -2,7 +2,7 @@ use std::fmt;
use reqwest::StatusCode;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use crate::utilities::get_header_with_bearer;
pub const DIGITAL_OCEAN_API_URL: &str = "https://api.digitalocean.com";
@@ -28,25 +28,36 @@ impl DoApiType {
}
}
pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Result<String, SimpleError> {
pub fn do_get_from_api(token: &str, api_type: DoApiType, url_api: String) -> Result<String, CommandError> {
let headers = get_header_with_bearer(token);
let res = reqwest::blocking::Client::new().get(url_api).headers(headers).send();
match res {
Ok(response) => match response.status() {
StatusCode::OK => Ok(response.text().unwrap()),
StatusCode::UNAUTHORIZED => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!("could not get {} information, ensure your DigitalOcean token is valid. {:?}", api_type, response)),
)),
_ => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!("unknown status code received from Digital Ocean Kubernetes API while retrieving {} information. {:?}", api_type, response)),
)),
},
Err(_) => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!("unable to get a response from Digital Ocean {} API", api_type)),
)),
Ok(response) => {
match response.status() {
StatusCode::OK => Ok(response.text().expect("Cannot get response text")),
StatusCode::UNAUTHORIZED => {
let message_safe = format!(
"Could not get {} information, ensure your DigitalOcean token is valid.",
api_type
);
return Err(CommandError::new(
format!("{}, response: {:?}", message_safe.to_string(), response),
Some(message_safe.to_string()),
));
}
_ => {
let message_safe = format!("Unknown status code received from Digital Ocean Kubernetes API while retrieving {} information.", api_type);
return Err(CommandError::new(
format!("{}, response: {:?}", message_safe.to_string(), response),
Some(message_safe.to_string()),
));
}
}
}
Err(_) => Err(CommandError::new_from_safe_message(format!(
"Unable to get a response from Digital Ocean {} API",
api_type
))),
}
}

View File

@@ -2,13 +2,13 @@ use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiT
use crate::cloud_provider::digitalocean::models::doks::KubernetesCluster;
use crate::cloud_provider::digitalocean::models::doks::{DoksList, DoksOptions, KubernetesVersion};
use crate::cloud_provider::utilities::VersionsNumber;
use crate::error::{SimpleError, SimpleErrorKind, StringError};
use crate::errors::CommandError;
use std::str::FromStr;
pub fn get_doks_info_from_name(
json_content: &str,
cluster_name: String,
) -> Result<Option<KubernetesCluster>, SimpleError> {
) -> Result<Option<KubernetesCluster>, CommandError> {
let res_doks = serde_json::from_str::<DoksList>(json_content);
match res_doks {
@@ -22,53 +22,39 @@ pub fn get_doks_info_from_name(
}
}
if cluster_info.is_some() {
info!("cluster {} is present from DigitalOcean API", cluster_name);
} else {
info!("cluster {} is not present from DigitalOcean API", cluster_name)
}
Ok(cluster_info)
}
Err(e) => Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while trying to deserialize json received from Digital Ocean DOKS API. {}",
e
)),
}),
Err(e) => {
let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API";
return Err(CommandError::new(
format!("{}, error: {}", safe_message.to_string(), e.to_string()),
Some(safe_message.to_string()),
));
}
}
}
pub fn get_do_latest_doks_slug_from_api(token: &str, wished_version: &str) -> Result<Option<String>, SimpleError> {
pub fn get_do_latest_doks_slug_from_api(token: &str, wished_version: &str) -> Result<Option<String>, CommandError> {
let api_url = format!("{}/options", DoApiType::Doks.api_url());
let json_content = do_get_from_api(token, DoApiType::Doks, api_url)?;
let doks_versions = get_doks_versions_from_api_output(&json_content)?;
match get_do_kubernetes_latest_slug_version(&doks_versions, wished_version) {
Ok(x) => Ok(x),
Err(e) => Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"version {} is not supported by DigitalOcean. {}",
wished_version, e
)),
}),
}
get_do_kubernetes_latest_slug_version(&doks_versions, wished_version)
}
fn get_doks_versions_from_api_output(json_content: &str) -> Result<Vec<KubernetesVersion>, SimpleError> {
fn get_doks_versions_from_api_output(json_content: &str) -> Result<Vec<KubernetesVersion>, CommandError> {
let res_doks_options = serde_json::from_str::<DoksOptions>(json_content);
match res_doks_options {
Ok(options) => Ok(options.options.versions),
Err(e) => Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while trying to deserialize json received from Digital Ocean DOKS API. {}",
e
)),
}),
Err(e) => {
let safe_message = "Error while trying to deserialize json received from Digital Ocean DOKS API";
return Err(CommandError::new(
format!("{}, error: {}", safe_message.to_string(), e.to_string()),
Some(safe_message.to_string()),
));
}
}
}
@@ -76,18 +62,24 @@ fn get_doks_versions_from_api_output(json_content: &str) -> Result<Vec<Kubernete
fn get_do_kubernetes_latest_slug_version(
doks_versions: &Vec<KubernetesVersion>,
wished_version: &str,
) -> Result<Option<String>, StringError> {
let wished_k8s_version = VersionsNumber::from_str(wished_version)?;
) -> Result<Option<String>, CommandError> {
let wished_k8s_version =
VersionsNumber::from_str(wished_version).map_err(|e| CommandError::new_from_safe_message(e.to_string()))?;
for kubernetes_doks_version in doks_versions {
let current_k8s_version = VersionsNumber::from_str(kubernetes_doks_version.kubernetes_version.as_str())?;
let current_k8s_version = VersionsNumber::from_str(kubernetes_doks_version.kubernetes_version.as_str())
.map_err(|e| CommandError::new_from_safe_message(e.to_string()))?;
if current_k8s_version.major == wished_k8s_version.major
&& current_k8s_version.minor == wished_k8s_version.minor
{
return Ok(Some(kubernetes_doks_version.slug.clone()));
}
}
Ok(None)
Err(CommandError::new_from_safe_message(format!(
"DOKS version `{}` is not supported.",
wished_k8s_version.to_string()
)))
}
#[cfg(test)]
@@ -373,20 +365,19 @@ mod tests_doks {
let doks_versions = get_doks_versions_from_api_output(json_content.as_str()).unwrap();
// not supported anymore version
assert!(get_do_kubernetes_latest_slug_version(&doks_versions, "1.18")
.unwrap()
.is_none());
assert!(get_do_kubernetes_latest_slug_version(&doks_versions, "1.18").is_err());
// supported versions
assert_eq!(
get_do_kubernetes_latest_slug_version(&doks_versions, "1.19")
.unwrap()
.unwrap(),
.expect("error getting do kubernetes version 1.19")
.expect("error, version 1.19 is none"),
"1.19.12-do.0".to_string()
);
assert_eq!(
get_do_kubernetes_latest_slug_version(&doks_versions, "1.21")
.unwrap()
.unwrap(),
.expect("error getting do kubernetes version 1.21")
.expect("error, version 1.21 is none"),
"1.21.2-do.2".to_string()
);
}

View File

@@ -5,12 +5,11 @@ use crate::cloud_provider::helm::{
ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DigitalOceanQoveryTerraformConfig {
@@ -118,16 +117,16 @@ pub fn do_helm_charts(
qovery_terraform_config_file: &str,
chart_config_prerequisites: &ChartsConfigPrerequisites,
chart_prefix_path: Option<&str>,
_kubernetes_config: &Path,
_envs: &[(String, String)],
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
info!("preparing chart configuration to be deployed");
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, CommandError> {
let content_file = match File::open(&qovery_terraform_config_file) {
Ok(x) => x,
Err(e) => return Err(SimpleError{ kind: SimpleErrorKind::Other, message: Some(
format!("Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?. {:?}", e)
)}),
Err(e) => {
let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?";
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
let chart_prefix = chart_prefix_path.unwrap_or("./");
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
@@ -135,14 +134,14 @@ pub fn do_helm_charts(
let qovery_terraform_config: DigitalOceanQoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
error!(
"error while parsing terraform config file {}: {:?}",
&qovery_terraform_config_file, &e
let message_safe = format!(
"Error while parsing terraform config file {}",
qovery_terraform_config_file
);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("{:?}", e)),
});
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
@@ -806,22 +805,13 @@ datasources:
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let qovery_agent_version: QoveryAgent = match get_qovery_app_version(
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,
&chart_config_prerequisites.infra_options.agent_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery agent version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let mut qovery_agent = CommonChart {
chart_info: ChartInfo {
name: "qovery-agent".to_string(),
@@ -888,22 +878,13 @@ datasources:
})
}
let qovery_engine_version: QoveryEngine = match get_qovery_app_version(
let qovery_engine_version: QoveryEngine = get_qovery_app_version(
QoveryAppName::Engine,
&chart_config_prerequisites.infra_options.engine_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery engine version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let qovery_engine = CommonChart {
chart_info: ChartInfo {
name: "qovery-engine".to_string(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
use crate::cloud_provider::kubernetes::InstanceType;
use crate::errors::CommandError;
use core::fmt;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
@@ -58,9 +59,9 @@ impl fmt::Display for DoInstancesType {
}
impl FromStr for DoInstancesType {
type Err = ();
type Err = CommandError;
fn from_str(s: &str) -> Result<DoInstancesType, ()> {
fn from_str(s: &str) -> Result<DoInstancesType, CommandError> {
match s {
"s-1vcpu-1gb" => Ok(DoInstancesType::S1vcpu1gb),
"s-1vcpu-2gb" => Ok(DoInstancesType::S1vcpu2gb),
@@ -69,7 +70,10 @@ impl FromStr for DoInstancesType {
"s-4vcpu-8gb" => Ok(DoInstancesType::S4vcpu8gb),
"s-6vcpu-16gb" => Ok(DoInstancesType::S6vcpu16gb),
"s-8vcpu-32gb" => Ok(DoInstancesType::S8vcpu32gb),
_ => Err(()),
_ => {
let message = format!("`{}` instance type is not supported", s);
return Err(CommandError::new(message.clone(), Some(message)));
}
}
}
}

View File

@@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use crate::cloud_provider::digitalocean::application::DoRegion;
use crate::cloud_provider::digitalocean::do_api_common::{do_get_from_api, DoApiType};
use crate::cloud_provider::digitalocean::models::vpc::{Vpc, Vpcs};
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "snake_case")]
@@ -31,7 +31,7 @@ pub fn get_do_subnet_available_from_api(
token: &str,
desired_subnet: String,
region: DoRegion,
) -> Result<Option<Vpc>, SimpleError> {
) -> Result<Option<Vpc>, CommandError> {
// get subnets from the API
let vpcs = match do_get_from_api(token, DoApiType::Vpc, DoApiType::Vpc.api_url()) {
Ok(x) => do_get_vpcs_from_api_output(x.as_str())?,
@@ -42,7 +42,7 @@ pub fn get_do_subnet_available_from_api(
get_do_vpc_from_subnet(desired_subnet, vpcs, region)
}
pub fn get_do_vpc_name_available_from_api(token: &str, desired_name: String) -> Result<Option<Vpc>, SimpleError> {
pub fn get_do_vpc_name_available_from_api(token: &str, desired_name: String) -> Result<Option<Vpc>, CommandError> {
// get names from the API
let vpcs = match do_get_from_api(token, DoApiType::Vpc, DoApiType::Vpc.api_url()) {
Ok(x) => do_get_vpcs_from_api_output(x.as_str())?,
@@ -53,13 +53,13 @@ pub fn get_do_vpc_name_available_from_api(token: &str, desired_name: String) ->
Ok(get_do_vpc_from_name(desired_name, vpcs))
}
pub fn get_do_random_available_subnet_from_api(token: &str, region: DoRegion) -> Result<String, SimpleError> {
pub fn get_do_random_available_subnet_from_api(token: &str, region: DoRegion) -> Result<String, CommandError> {
let json_content = do_get_from_api(token, DoApiType::Vpc, DoApiType::Vpc.api_url())?;
let existing_vpcs = do_get_vpcs_from_api_output(&json_content)?;
get_random_available_subnet(existing_vpcs, region)
}
fn get_random_available_subnet(existing_vpcs: Vec<Vpc>, region: DoRegion) -> Result<String, SimpleError> {
fn get_random_available_subnet(existing_vpcs: Vec<Vpc>, region: DoRegion) -> Result<String, CommandError> {
let subnet_start = 0;
let subnet_end = 254;
@@ -78,10 +78,9 @@ fn get_random_available_subnet(existing_vpcs: Vec<Vpc>, region: DoRegion) -> Res
}
}
Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("no available subnet found on this Digital Ocean account.".to_string()),
})
Err(CommandError::new_from_safe_message(
"no available subnet found on this Digital Ocean account.".to_string(),
))
}
fn get_do_vpc_from_name(desired_name: String, existing_vpcs: Vec<Vpc>) -> Option<Vpc> {
@@ -101,17 +100,14 @@ fn get_do_vpc_from_subnet(
desired_subnet: String,
existing_vpcs: Vec<Vpc>,
region: DoRegion,
) -> Result<Option<Vpc>, SimpleError> {
) -> Result<Option<Vpc>, CommandError> {
let mut exists = None;
match is_do_reserved_vpc_subnets(region, desired_subnet.as_str()) {
true => Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"subnet {} can't be used because it's a DigitalOcean dedicated subnet",
desired_subnet
)),
}),
true => Err(CommandError::new_from_safe_message(format!(
"subnet {} can't be used because it's a DigitalOcean dedicated subnet",
desired_subnet
))),
false => {
for vpc in existing_vpcs {
if vpc.ip_range == desired_subnet {
@@ -124,19 +120,19 @@ fn get_do_vpc_from_subnet(
}
}
fn do_get_vpcs_from_api_output(json_content: &str) -> Result<Vec<Vpc>, SimpleError> {
fn do_get_vpcs_from_api_output(json_content: &str) -> Result<Vec<Vpc>, CommandError> {
// better to use lib when VPC will be supported https://github.com/LoganDark/digitalocean/issues/3
let res_vpcs = serde_json::from_str::<Vpcs>(json_content);
match res_vpcs {
Ok(vpcs) => Ok(vpcs.vpcs),
Err(e) => Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format!(
"error while trying to deserialize json received from Digital Ocean VPC API. {}",
e
)),
)),
Err(e) => {
let message_safe = "Error while trying to deserialize json received from Digital Ocean VPC API";
Err(CommandError::new(
format!("{}, error: {}", message_safe.to_string(), e),
Some(message_safe.to_string()),
))
}
}
}

View File

@@ -8,8 +8,9 @@ use crate::cloud_provider::service::{
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -314,6 +315,7 @@ impl Create for Router {
function_name!(),
self.name(),
);
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let kubernetes = target.kubernetes;
let environment = target.environment;
@@ -330,27 +332,32 @@ impl Create for Router {
let context = self.tera_context(target)?;
let from_dir = format!("{}/digitalocean/charts/q-ingress-tls", self.context.lib_root_dir());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), &context),
)?;
if let Err(e) =
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context)
{
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
from_dir.to_string(),
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
),
)?;
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
@@ -434,13 +441,14 @@ impl Pause for Router {
impl Delete for Router {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, false)
delete_router(target, self, false, event_details)
}
fn on_delete_check(&self) -> Result<(), EngineError> {
@@ -449,12 +457,13 @@ impl Delete for Router {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, true)
delete_router(target, self, true, event_details)
}
}

View File

@@ -10,7 +10,7 @@ use crate::cmd::kubectl::{
kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output,
};
use crate::cmd::structs::HelmHistoryRow;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use crate::utilities::calculate_hash;
use semver::Version;
use std::collections::HashMap;
@@ -143,20 +143,18 @@ impl Default for ChartInfo {
}
pub trait HelmChart: Send {
fn check_prerequisites(&self) -> Result<Option<ChartPayload>, SimpleError> {
fn check_prerequisites(&self) -> Result<Option<ChartPayload>, CommandError> {
let chart = self.get_chart_info();
for file in chart.values_files.iter() {
match fs::metadata(file) {
Ok(_) => {}
Err(e) => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"Can't access helm chart override file {} for chart {}. {:?}",
file, chart.name, e
)),
})
}
if let Err(e) = fs::metadata(file) {
let safe_message = format!(
"Can't access helm chart override file `{}` for chart `{}`",
file, chart.name,
);
return Err(CommandError::new(
format!("{}, error: {:?}", safe_message, e),
Some(safe_message),
));
}
}
Ok(None)
@@ -178,7 +176,7 @@ pub trait HelmChart: Send {
kubernetes_config: &Path,
envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let chart_infos = self.get_chart_info();
// Cleaning any existing crash looping pod for this helm chart
@@ -194,17 +192,14 @@ pub trait HelmChart: Send {
Ok(payload)
}
fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<Option<ChartPayload>, SimpleError> {
fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<Option<ChartPayload>, CommandError> {
info!("prepare and deploy chart {}", &self.get_chart_info().name);
let payload = self.check_prerequisites()?;
let payload = self.pre_exec(&kubernetes_config, &envs, payload)?;
let payload = match self.exec(&kubernetes_config, &envs, payload.clone()) {
Ok(payload) => payload,
Err(e) => {
error!(
"Error while deploying chart: {:?}",
e.message.clone().expect("no error message provided")
);
error!("Error while deploying chart: {}", e.message());
self.on_deploy_failure(&kubernetes_config, &envs, payload)?;
return Err(e);
}
@@ -218,7 +213,7 @@ pub trait HelmChart: Send {
kubernetes_config: &Path,
envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
let chart_info = self.get_chart_info();
match chart_info.action {
@@ -230,7 +225,7 @@ pub trait HelmChart: Send {
) {
warn!(
"error while trying to destroy chart if breaking change is detected: {:?}",
e.message
e.message()
);
}
@@ -262,7 +257,7 @@ pub trait HelmChart: Send {
_kubernetes_config: &Path,
_envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
Ok(payload)
}
@@ -271,7 +266,7 @@ pub trait HelmChart: Send {
kubernetes_config: &Path,
envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
// print events for future investigation
let environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
match kubectl_exec_get_events(
@@ -293,7 +288,7 @@ fn deploy_parallel_charts(
kubernetes_config: &Path,
envs: &[(String, String)],
charts: Vec<Box<dyn HelmChart>>,
) -> Result<(), SimpleError> {
) -> Result<(), CommandError> {
let mut handles = vec![];
for chart in charts.into_iter() {
@@ -310,16 +305,17 @@ fn deploy_parallel_charts(
for handle in handles {
match handle.join() {
Ok(helm_run_ret) => match helm_run_ret {
Ok(_) => {}
Err(e) => return Err(e),
},
Ok(helm_run_ret) => {
if let Err(e) = helm_run_ret {
return Err(e);
}
}
Err(e) => {
error!("{:?}", e);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("thread panicked during parallel charts deployments".to_string()),
});
let safe_message = "Thread panicked during parallel charts deployments.";
return Err(CommandError::new(
format!("{}, error: {:?}", safe_message.to_string(), e),
Some(safe_message.to_string()),
));
}
}
}
@@ -332,7 +328,7 @@ pub fn deploy_charts_levels(
envs: &Vec<(String, String)>,
charts: Vec<Vec<Box<dyn HelmChart>>>,
dry_run: bool,
) -> Result<(), SimpleError> {
) -> Result<(), CommandError> {
// first show diff
for level in &charts {
for chart in level {
@@ -352,9 +348,8 @@ pub fn deploy_charts_levels(
return Ok(());
}
for level in charts.into_iter() {
match deploy_parallel_charts(&kubernetes_config, &envs, level) {
Ok(_) => {}
Err(e) => return Err(e),
if let Err(e) = deploy_parallel_charts(&kubernetes_config, &envs, level) {
return Err(e);
}
}
@@ -401,7 +396,7 @@ impl HelmChart for CoreDNSConfigChart {
kubernetes_config: &Path,
envs: &[(String, String)],
_payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let kind = "configmap";
let mut environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
environment_variables.push(("KUBECONFIG", kubernetes_config.to_str().unwrap()));
@@ -427,10 +422,9 @@ impl HelmChart for CoreDNSConfigChart {
) {
Ok(cm) => {
if cm.data.corefile.is_none() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("Corefile data structure is not found in CoreDNS configmap".to_string()),
});
return Err(CommandError::new_from_safe_message(
"Corefile data structure is not found in CoreDNS configmap".to_string(),
));
};
calculate_hash(&cm.data.corefile.unwrap())
}
@@ -442,7 +436,7 @@ impl HelmChart for CoreDNSConfigChart {
// set labels and annotations to give helm ownership
info!("setting annotations and labels on {}/{}", &kind, &self.chart_info.name);
let steps = || -> Result<(), SimpleError> {
let steps = || -> Result<(), CommandError> {
kubectl_exec_with_output(
vec![
"-n",
@@ -494,28 +488,22 @@ impl HelmChart for CoreDNSConfigChart {
Ok(Some(payload))
}
fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<Option<ChartPayload>, SimpleError> {
fn run(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<Option<ChartPayload>, CommandError> {
info!("prepare and deploy chart {}", &self.get_chart_info().name);
self.check_prerequisites()?;
let payload = match self.pre_exec(&kubernetes_config, &envs, None) {
Ok(p) => match p {
None => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(
"CoreDNS configmap checksum couldn't be get, can't deploy CoreDNS chart".to_string(),
),
})
return Err(CommandError::new_from_safe_message(
"CoreDNS configmap checksum couldn't be get, can't deploy CoreDNS chart".to_string(),
))
}
Some(p) => p,
},
Err(e) => return Err(e),
};
if let Err(e) = self.exec(&kubernetes_config, &envs, None) {
error!(
"Error while deploying chart: {:?}",
e.message.clone().expect("no message provided")
);
error!("Error while deploying chart: {:?}", e.message());
self.on_deploy_failure(&kubernetes_config, &envs, None)?;
return Err(e);
};
@@ -528,7 +516,7 @@ impl HelmChart for CoreDNSConfigChart {
kubernetes_config: &Path,
envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let mut environment_variables = Vec::new();
for env in envs {
environment_variables.push((env.0.as_str(), env.1.as_str()));
@@ -537,17 +525,15 @@ impl HelmChart for CoreDNSConfigChart {
// detect configmap data change
let previous_configmap_checksum = match &payload {
None => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("missing payload, can't check coredns update".to_string()),
})
return Err(CommandError::new_from_safe_message(
"Missing payload, can't check coredns update".to_string(),
))
}
Some(x) => match x.data.get("checksum") {
None => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("missing configmap checksum, can't check coredns diff".to_string()),
})
return Err(CommandError::new_from_safe_message(
"Missing configmap checksum, can't check coredns diff".to_string(),
))
}
Some(c) => c.clone(),
},
@@ -560,10 +546,9 @@ impl HelmChart for CoreDNSConfigChart {
) {
Ok(cm) => {
if cm.data.corefile.is_none() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("Corefile data structure is not found in CoreDNS configmap".to_string()),
});
return Err(CommandError::new_from_safe_message(
"Corefile data structure is not found in CoreDNS configmap".to_string(),
));
};
calculate_hash(&cm.data.corefile.unwrap()).to_string()
}
@@ -603,7 +588,7 @@ impl HelmChart for PrometheusOperatorConfigChart {
kubernetes_config: &Path,
envs: &[(String, String)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, SimpleError> {
) -> Result<Option<ChartPayload>, CommandError> {
let environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
let chart_info = self.get_chart_info();
match chart_info.action {
@@ -614,8 +599,8 @@ impl HelmChart for PrometheusOperatorConfigChart {
chart_info,
) {
warn!(
"error while trying to destroy chart if breaking change is detected: {:?}",
e.message
"error while trying to destroy chart if breaking change is detected: {}",
e.message()
);
}
@@ -654,105 +639,7 @@ impl HelmChart for PrometheusOperatorConfigChart {
}
}
// Qovery Portal
// #[derive(Default)]
// pub struct QoveryPortalChart {
// pub chart_info: ChartInfo,
// }
//
// impl HelmChart for QoveryPortalChart {
// fn get_chart_info(&self) -> &ChartInfo {
// &self.chart_info
// }
//
// fn pre_exec(
// &self,
// kubernetes_config: &Path,
// envs: &[(String, String)],
// _payload: Option<ChartPayload>,
// ) -> Result<Option<ChartPayload>, SimpleError> {
// let mut environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
// let cluster_default_ingress_loadbalancer_address = match kubectl_exec_get_external_ingress_hostname(
// &kubernetes_config,
// &get_chart_namespace(HelmChartNamespaces::NginxIngress), // todo: would be better to get it directly from the chart itself
// "app=nginx-ingress,component=controller",
// environment_variables,
// ) {
// Ok(x) => {
// if x.is_some() {
// x.unwrap()
// } else {
// return Err(SimpleError {
// kind: SimpleErrorKind::Other,
// message: Some(format!(
// "No default Nginx ingress was found, can't deploy Qovery portal. {:?}",
// e.message
// )),
// });
// }
// }
// Err(e) => {
// return Err(SimpleError {
// kind: SimpleErrorKind::Other,
// message: Some(format!(
// "Error while trying to get default Nginx ingress to deploy Qovery portal. {:?}",
// e.message
// )),
// })
// }
// };
// let mut configmap_hash = HashMap::new();
// configmap_hash.insert(
// "loadbalancer_address".to_string(),
// cluster_default_ingress_loadbalancer_address,
// );
// let payload = ChartPayload { data: configmap_hash };
//
// Ok(Some(payload))
// }
//
// fn exec(
// &self,
// kubernetes_config: &Path,
// envs: &[(String, String)],
// payload: Option<ChartPayload>,
// ) -> Result<Option<ChartPayload>, SimpleError> {
// if payload.is_none() {
// return Err(SimpleError {
// kind: SimpleErrorKind::Other,
// message: Some("payload is missing for qovery-portal chart".to_string()),
// });
// }
// let external_dns_target = match payload.unwrap().data.get("loadbalancer_address") {
// None => {
// return Err(SimpleError {
// kind: SimpleErrorKind::Other,
// message: Some("loadbalancer_address payload is missing, can't deploy qovery portal".to_string()),
// })
// }
// Some(x) => x.into_string(),
// };
//
// let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
// let mut chart = self.chart_info.clone();
// chart.values.push(ChartSetValue {
// key: "externalDnsTarget".to_string(),
// value: external_dns_target,
// });
//
// match self.get_chart_info().action {
// HelmAction::Deploy => helm_exec_upgrade_with_chart_info(kubernetes_config, &environment_variables, &chart)?,
// HelmAction::Destroy => {
// helm_exec_uninstall_with_chart_info(kubernetes_config, &environment_variables, &chart)?
// }
// HelmAction::Skip => {}
// }
// Ok(payload)
// }
// }
pub fn get_latest_successful_deployment(helm_history_list: &[HelmHistoryRow]) -> Result<HelmHistoryRow, SimpleError> {
pub fn get_latest_successful_deployment(helm_history_list: &[HelmHistoryRow]) -> Result<HelmHistoryRow, CommandError> {
let mut helm_history_reversed = helm_history_list.to_owned();
helm_history_reversed.reverse();
@@ -762,13 +649,10 @@ pub fn get_latest_successful_deployment(helm_history_list: &[HelmHistoryRow]) ->
}
}
Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"no succeed revision found for chart {}",
helm_history_reversed[0].chart
)),
})
Err(CommandError::new_from_safe_message(format!(
"No succeed revision found for chart `{}`",
helm_history_reversed[0].chart
)))
}
pub fn get_engine_helm_action_from_location(location: &EngineLocation) -> HelmAction {
@@ -791,23 +675,13 @@ pub struct ShellAgentContext<'a> {
pub fn get_chart_for_shell_agent(
context: ShellAgentContext,
chart_path: impl Fn(&str) -> String,
) -> Result<CommonChart, SimpleError> {
let shell_agent_version: QoveryShellAgent = match get_qovery_app_version(
) -> Result<CommonChart, CommandError> {
let shell_agent_version: QoveryShellAgent = get_qovery_app_version(
QoveryAppName::ShellAgent,
context.api_token,
context.api_url,
context.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery shell agent version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let shell_agent = CommonChart {
chart_info: ChartInfo {
name: "shell-agent".to_string(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
use reqwest::{header, Error};
use crate::errors::CommandError;
use reqwest::header;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
@@ -40,7 +41,7 @@ pub fn get_qovery_app_version<T: DeserializeOwned>(
token: &str,
api_fqdn: &str,
cluster_id: &str,
) -> Result<T, Error> {
) -> Result<T, CommandError> {
let mut headers = header::HeaderMap::new();
headers.insert("Content-Type", "application/json".parse().unwrap());
headers.insert("X-Qovery-Signature", token.parse().unwrap());
@@ -56,11 +57,19 @@ pub fn get_qovery_app_version<T: DeserializeOwned>(
api_fqdn, app_type, cluster_id
);
let message_safe = format!("Error while trying to get `{}` version.", app_type);
match reqwest::blocking::Client::new().get(&url).headers(headers).send() {
Ok(x) => match x.json::<T>() {
Ok(qa) => Ok(qa),
Err(e) => Err(e),
Err(e) => Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
)),
},
Err(e) => Err(e),
Err(e) => Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
)),
}
}

View File

@@ -18,7 +18,8 @@ use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset};
use crate::error::EngineErrorCause::Internal;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::errors::CommandError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper, Port};
use ::function_name::named;
@@ -388,6 +389,7 @@ impl Pause for Application {
impl Delete for Application {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -396,7 +398,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, false)
delete_stateless_service(target, self, false, event_details.clone())
})
}
@@ -406,6 +408,7 @@ impl Delete for Application {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -414,7 +417,7 @@ impl Delete for Application {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, self, true)
delete_stateless_service(target, self, true, event_details.clone())
})
}
}
@@ -535,16 +538,21 @@ impl fmt::Display for ScwZone {
}
impl FromStr for ScwZone {
type Err = ();
type Err = CommandError;
fn from_str(s: &str) -> Result<ScwZone, ()> {
fn from_str(s: &str) -> Result<ScwZone, CommandError> {
match s {
"fr-par-1" => Ok(ScwZone::Paris1),
"fr-par-2" => Ok(ScwZone::Paris2),
"fr-par-3" => Ok(ScwZone::Paris3),
"nl-ams-1" => Ok(ScwZone::Amsterdam1),
"pl-waw-1" => Ok(ScwZone::Warsaw1),
_ => Err(()),
_ => {
return Err(CommandError::new_from_safe_message(format!(
"`{}` zone is not supported",
s
)));
}
}
}
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -265,6 +265,7 @@ impl Terraform for MongoDB {
impl Create for MongoDB {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -273,7 +274,7 @@ impl Create for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -328,6 +329,7 @@ impl Pause for MongoDB {
impl Delete for MongoDB {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -336,7 +338,7 @@ impl Delete for MongoDB {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -12,7 +12,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -302,6 +302,7 @@ impl Terraform for MySQL {
impl Create for MySQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -310,7 +311,7 @@ impl Create for MySQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -366,6 +367,7 @@ impl Pause for MySQL {
impl Delete for MySQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -374,7 +376,7 @@ impl Delete for MySQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -12,7 +12,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope, StringError};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -311,6 +311,7 @@ impl Terraform for PostgreSQL {
impl Create for PostgreSQL {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -319,7 +320,7 @@ impl Create for PostgreSQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -375,6 +376,7 @@ impl Pause for PostgreSQL {
impl Delete for PostgreSQL {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -383,7 +385,7 @@ impl Delete for PostgreSQL {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -10,7 +10,7 @@ use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl;
use crate::error::{EngineError, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::DatabaseMode::MANAGED;
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -264,6 +264,7 @@ impl Terraform for Redis {
impl Create for Redis {
#[named]
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -272,7 +273,7 @@ impl Create for Redis {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Create, || {
deploy_stateful_service(target, self)
deploy_stateful_service(target, self, event_details.clone())
})
}
@@ -326,6 +327,7 @@ impl Pause for Redis {
impl Delete for Redis {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
@@ -334,7 +336,7 @@ impl Delete for Redis {
);
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Delete, || {
delete_stateful_service(target, self)
delete_stateful_service(target, self, event_details.clone())
})
}

View File

@@ -6,7 +6,7 @@ use crate::cloud_provider::helm::{
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName, QoveryEngine};
use crate::cloud_provider::scaleway::application::{ScwRegion, ScwZone};
use crate::cloud_provider::scaleway::kubernetes::KapsuleOptions;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::fs::File;
@@ -110,17 +110,18 @@ pub fn scw_helm_charts(
chart_prefix_path: Option<&str>,
_kubernetes_config: &Path,
_envs: &[(String, String)],
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, CommandError> {
info!("preparing chart configuration to be deployed");
let content_file = match File::open(&qovery_terraform_config_file) {
Ok(x) => x,
Err(e) => return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(
format!("Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?. {:?}", e)
)
}),
Err(e) => {
let message_safe = "Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?";
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
let chart_prefix = chart_prefix_path.unwrap_or("./");
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
@@ -128,14 +129,14 @@ pub fn scw_helm_charts(
let qovery_terraform_config: ScalewayQoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
error!(
"error while parsing terraform config file {}: {:?}",
&qovery_terraform_config_file, &e
let message_safe = format!(
"Error while parsing terraform config file {}",
qovery_terraform_config_file
);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("{:?}", e)),
});
return Err(CommandError::new(
format!("{}, error: {:?}", message_safe.to_string(), e),
Some(message_safe.to_string()),
));
}
};
@@ -677,22 +678,13 @@ datasources:
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let qovery_agent_version: QoveryAgent = match get_qovery_app_version(
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,
&chart_config_prerequisites.infra_options.agent_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery agent version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let mut qovery_agent = CommonChart {
chart_info: ChartInfo {
name: "qovery-agent".to_string(),
@@ -758,22 +750,13 @@ datasources:
})
}
let qovery_engine_version: QoveryEngine = match get_qovery_app_version(
let qovery_engine_version: QoveryEngine = get_qovery_app_version(
QoveryAppName::Engine,
&chart_config_prerequisites.infra_options.engine_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
) {
Ok(x) => x,
Err(e) => {
let msg = format!("Qovery engine version couldn't be retrieved. {}", e);
error!("{}", &msg);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(msg),
});
}
};
)?;
let qovery_engine = CommonChart {
chart_info: ChartInfo {
name: "qovery-engine".to_string(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
use crate::cloud_provider::kubernetes::InstanceType;
use crate::errors::SimpleError;
use crate::errors::CommandError;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::str::FromStr;
@@ -67,9 +67,9 @@ impl fmt::Display for ScwInstancesType {
}
impl FromStr for ScwInstancesType {
type Err = SimpleError;
type Err = CommandError;
fn from_str(s: &str) -> Result<ScwInstancesType, SimpleError> {
fn from_str(s: &str) -> Result<ScwInstancesType, CommandError> {
match s {
"gp1-xs" => Ok(ScwInstancesType::Gp1Xs),
"gp1-s" => Ok(ScwInstancesType::Gp1S),
@@ -82,7 +82,7 @@ impl FromStr for ScwInstancesType {
"render-s" => Ok(ScwInstancesType::RenderS),
_ => {
let message = format!("`{}` instance type is not supported", s);
return Err(SimpleError::new(message.clone(), message));
return Err(CommandError::new(message.clone(), Some(message)));
}
}
}

View File

@@ -8,8 +8,9 @@ use crate::cloud_provider::service::{
use crate::cloud_provider::utilities::{check_cname_for, print_action, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope};
use crate::events::{ToTransmitter, Transmitter};
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::errors::EngineError as NewEngineError;
use crate::events::{EnvironmentStep, Stage, ToTransmitter, Transmitter};
use crate::models::{Context, Listen, Listener, Listeners};
use ::function_name::named;
@@ -262,6 +263,7 @@ impl Create for Router {
function_name!(),
self.name(),
);
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let kubernetes = target.kubernetes;
let environment = target.environment;
@@ -278,27 +280,32 @@ impl Create for Router {
let context = self.tera_context(target)?;
let from_dir = format!("{}/scaleway/charts/q-ingress-tls", self.context.lib_root_dir());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), &context),
)?;
if let Err(e) =
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), context)
{
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
from_dir.to_string(),
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
),
)?;
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
self.selector(),
workspace_dir.as_str(),
self.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
self.service_type(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
@@ -380,13 +387,14 @@ impl Pause for Router {
impl Delete for Router {
#[named]
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, false)
delete_router(target, self, false, event_details)
}
fn on_delete_check(&self) -> Result<(), EngineError> {
@@ -395,12 +403,13 @@ impl Delete for Router {
#[named]
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
let event_details = self.get_event_details(Stage::Environment(EnvironmentStep::Delete));
print_action(
self.cloud_provider_name(),
self.struct_name(),
function_name!(),
self.name(),
);
delete_router(target, self, true)
delete_router(target, self, true, event_details)
}
}

View File

@@ -15,9 +15,10 @@ use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::Statefulset;
use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_scale_replicas_by_selector, ScalingKind};
use crate::cmd::structs::LabelsContent;
use crate::error::{cast_simple_error_to_engine_error, StringError};
use crate::error::StringError;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::events::{EventDetails, Stage, ToTransmitter};
use crate::errors::{CommandError, EngineError as NewEngineError};
use crate::events::{EnvironmentStep, EventDetails, GeneralStep, Stage, ToTransmitter};
use crate::models::ProgressLevel::Info;
use crate::models::{
Context, DatabaseMode, Listen, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
@@ -85,8 +86,8 @@ pub trait Service: ToTransmitter {
fn tera_context(&self, target: &DeploymentTarget) -> Result<TeraContext, EngineError>;
// used to retrieve logs by using Kubernetes labels (selector)
fn selector(&self) -> Option<String>;
fn debug_logs(&self, deployment_target: &DeploymentTarget) -> Vec<String> {
debug_logs(self, deployment_target)
fn debug_logs(&self, deployment_target: &DeploymentTarget, event_details: EventDetails) -> Vec<String> {
debug_logs(self, deployment_target, event_details)
}
fn is_listening(&self, ip: &str) -> bool {
let private_port = match self.private_port() {
@@ -110,14 +111,11 @@ pub trait Service: ToTransmitter {
for binary in binaries.iter() {
if !crate::cmd::utilities::does_binary_exist(binary) {
let err = format!("{} binary not found", binary);
return Err(EngineError::new(
EngineErrorCause::Internal,
EngineErrorScope::Engine,
self.id(),
Some(err),
));
return Err(NewEngineError::new_missing_required_binary(
self.get_event_details(Stage::General(GeneralStep::ValidateSystemRequirements)),
binary.to_string(),
)
.to_legacy_engine_error());
}
}
@@ -290,13 +288,13 @@ impl<'a> ToString for ServiceType<'a> {
}
}
pub fn debug_logs<T>(service: &T, deployment_target: &DeploymentTarget) -> Vec<String>
pub fn debug_logs<T>(service: &T, deployment_target: &DeploymentTarget, event_details: EventDetails) -> Vec<String>
where
T: Service + ?Sized,
{
let kubernetes = deployment_target.kubernetes;
let environment = deployment_target.environment;
match get_stateless_resource_information_for_user(kubernetes, environment, service) {
match get_stateless_resource_information_for_user(kubernetes, environment, service, event_details.clone()) {
Ok(lines) => lines,
Err(err) => {
error!(
@@ -380,16 +378,21 @@ where
let environment = target.environment;
let workspace_dir = service.workspace_directory();
let tera_context = service.tera_context(target)?;
let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_dir(),
workspace_dir.as_str(),
tera_context,
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_dir(),
workspace_dir.as_str(),
&tera_context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let helm_release_name = service.helm_release_name();
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
@@ -408,48 +411,54 @@ where
});
// create a namespace with labels if do not exists
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_create_namespace(
kubernetes_config_file_path.as_str(),
environment.namespace(),
namespace_labels,
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
crate::cmd::kubectl::kubectl_exec_create_namespace(
kubernetes_config_file_path.as_str(),
environment.namespace(),
namespace_labels,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_create_namespace(event_details.clone(), environment.namespace().to_string(), e)
.to_legacy_engine_error()
})?;
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
service.selector(),
workspace_dir.as_str(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
),
)?;
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
service.selector(),
workspace_dir.as_str(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
)
.map_err(|e| NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error())?;
// check deployment status
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
if helm_history_row.is_none()
|| !helm_history_row
.expect("Error getting helm history row")
.is_successfully_deployed()
{
return Err(thrown_error);
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry(
kubernetes_config_file_path.as_str(),
environment.namespace(),
service.selector().unwrap_or("".to_string()).as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
crate::cmd::kubectl::kubectl_exec_is_pod_ready_with_retry(
kubernetes_config_file_path.as_str(),
environment.namespace(),
service.selector().unwrap_or("".to_string()).as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_pod_not_ready(
event_details.clone(),
service.selector().unwrap_or("".to_string()),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?;
Ok(())
}
@@ -462,33 +471,39 @@ where
let kubernetes = target.kubernetes;
let environment = target.environment;
let helm_release_name = service.helm_release_name();
let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::Deploy));
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
Ok(path) => path,
Err(e) => return Err(e.to_legacy_engine_error()),
};
let history_rows = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::helm::helm_exec_history(
let history_rows = crate::cmd::helm::helm_exec_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_history_error(
event_details.clone(),
helm_release_name.to_string(),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?;
if history_rows.len() == 1 {
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
&kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
if history_rows.len() == 1 {
cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_uninstall_error(event_details.clone(), helm_release_name.to_string(), e)
.to_legacy_engine_error()
})?;
}
Ok(())
@@ -500,10 +515,11 @@ pub fn scale_down_database(
replicas_count: usize,
) -> Result<(), EngineError> {
if service.is_managed_service() {
info!("Doing nothing for pause database as it is a managed service");
// Doing nothing for pause database as it is a managed service
return Ok(());
}
let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::ScaleDown));
let kubernetes = target.kubernetes;
let environment = target.environment;
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
@@ -511,20 +527,25 @@ pub fn scale_down_database(
Err(e) => return Err(e.to_legacy_engine_error()),
};
let scaledown_ret = kubectl_exec_scale_replicas_by_selector(
let selector = format!("databaseId={}", service.id());
kubectl_exec_scale_replicas_by_selector(
kubernetes_config_file_path,
kubernetes.cloud_provider().credentials_environment_variables(),
environment.namespace(),
Statefulset,
format!("databaseId={}", service.id()).as_str(),
selector.as_str(),
replicas_count as u32,
);
cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
scaledown_ret,
)
.map_err(|e| {
NewEngineError::new_k8s_scale_replicas(
event_details.clone(),
selector.to_string(),
environment.namespace().to_string(),
replicas_count as u32,
e,
)
.to_legacy_engine_error()
})
}
pub fn scale_down_application(
@@ -533,6 +554,7 @@ pub fn scale_down_application(
replicas_count: usize,
scaling_kind: ScalingKind,
) -> Result<(), EngineError> {
let event_details = service.get_event_details(Stage::Environment(EnvironmentStep::ScaleDown));
let kubernetes = target.kubernetes;
let environment = target.environment;
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
@@ -540,32 +562,46 @@ pub fn scale_down_application(
Err(e) => return Err(e.to_legacy_engine_error()),
};
let scaledown_ret = kubectl_exec_scale_replicas_by_selector(
kubectl_exec_scale_replicas_by_selector(
kubernetes_config_file_path,
kubernetes.cloud_provider().credentials_environment_variables(),
environment.namespace(),
scaling_kind,
service.selector().unwrap_or("".to_string()).as_str(),
replicas_count as u32,
);
cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
scaledown_ret,
)
.map_err(|e| {
NewEngineError::new_k8s_scale_replicas(
event_details.clone(),
service.selector().unwrap_or("".to_string()),
environment.namespace().to_string(),
replicas_count as u32,
e,
)
.to_legacy_engine_error()
})
}
pub fn delete_router<T>(target: &DeploymentTarget, service: &T, is_error: bool) -> Result<(), EngineError>
pub fn delete_router<T>(
target: &DeploymentTarget,
service: &T,
is_error: bool,
event_details: EventDetails,
) -> Result<(), EngineError>
where
T: Router,
{
send_progress_on_long_task(service, crate::cloud_provider::service::Action::Delete, || {
delete_stateless_service(target, service, is_error)
delete_stateless_service(target, service, is_error, event_details.clone())
})
}
pub fn delete_stateless_service<T>(target: &DeploymentTarget, service: &T, is_error: bool) -> Result<(), EngineError>
pub fn delete_stateless_service<T>(
target: &DeploymentTarget,
service: &T,
is_error: bool,
event_details: EventDetails,
) -> Result<(), EngineError>
where
T: Service + Helm,
{
@@ -578,16 +614,26 @@ where
kubernetes,
environment,
service.selector().unwrap_or("".to_string()).as_str(),
Stage::Environment(EnvironmentStep::Delete),
)?;
}
// clean the resource
let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str())?;
let _ = helm_uninstall_release(
kubernetes,
environment,
helm_release_name.as_str(),
event_details.clone(),
)?;
Ok(())
}
pub fn deploy_stateful_service<T>(target: &DeploymentTarget, service: &T) -> Result<(), EngineError>
pub fn deploy_stateful_service<T>(
target: &DeploymentTarget,
service: &T,
event_details: EventDetails,
) -> Result<(), EngineError>
where
T: StatefulService + Helm + Terraform,
{
@@ -605,44 +651,57 @@ where
let context = service.tera_context(target)?;
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.terraform_common_resource_dir_path(),
&workspace_dir,
context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.terraform_common_resource_dir_path(),
&workspace_dir,
&context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.terraform_resource_dir_path(),
&workspace_dir,
context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.terraform_resource_dir_path(),
workspace_dir.as_str(),
&context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
let external_svc_dir = format!("{}/{}", workspace_dir, "external-name-svc");
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_external_name_service_dir(),
external_svc_dir.as_str(),
context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_external_name_service_dir(),
format!("{}/{}", workspace_dir, "external-name-svc"),
&context,
),
)?;
external_svc_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::terraform::terraform_init_validate_plan_apply(
workspace_dir.as_str(),
service.context().is_dry_run_deploy(),
),
)?;
let _ = crate::cmd::terraform::terraform_init_validate_plan_apply(
workspace_dir.as_str(),
service.context().is_dry_run_deploy(),
)
.map_err(|e| {
NewEngineError::new_terraform_error_while_executing_pipeline(event_details.clone(), e)
.to_legacy_engine_error()
})?;
} else {
// use helm
info!(
@@ -660,26 +719,34 @@ where
};
// default chart
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_dir(),
workspace_dir.as_str(),
context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_dir(),
workspace_dir.as_str(),
&context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
// overwrite with our chart values
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_values_dir(),
workspace_dir.as_str(),
context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_values_dir(),
workspace_dir.as_str(),
&context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
// define labels to add to namespace
let namespace_labels = service.context().resource_expiration_in_seconds().map(|_| {
@@ -692,32 +759,31 @@ where
});
// create a namespace with labels if it does not exist
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_create_namespace(
kubernetes_config_file_path.to_string(),
environment.namespace(),
namespace_labels,
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
crate::cmd::kubectl::kubectl_exec_create_namespace(
kubernetes_config_file_path.to_string(),
environment.namespace(),
namespace_labels,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_create_namespace(event_details.clone(), environment.namespace().to_string(), e)
.to_legacy_engine_error()
})?;
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.to_string(),
environment.namespace(),
service.helm_release_name().as_str(),
service.selector(),
workspace_dir.to_string(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
),
)?;
let helm_history_row = crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.to_string(),
environment.namespace(),
service.helm_release_name().as_str(),
service.selector(),
workspace_dir.to_string(),
service.start_timeout(),
kubernetes.cloud_provider().credentials_environment_variables(),
service.service_type(),
)
.map_err(|e| {
NewEngineError::new_helm_charts_upgrade_error(event_details.clone(), e).to_legacy_engine_error()
})?;
// check deployment status
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
@@ -754,7 +820,11 @@ where
Ok(())
}
pub fn delete_stateful_service<T>(target: &DeploymentTarget, service: &T) -> Result<(), EngineError>
pub fn delete_stateful_service<T>(
target: &DeploymentTarget,
service: &T,
event_details: EventDetails,
) -> Result<(), EngineError>
where
T: StatefulService + Helm + Terraform,
{
@@ -764,45 +834,62 @@ where
let workspace_dir = service.workspace_directory();
let tera_context = service.tera_context(target)?;
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.terraform_common_resource_dir_path(),
workspace_dir.as_str(),
tera_context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.terraform_common_resource_dir_path(),
workspace_dir.as_str(),
&tera_context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.terraform_resource_dir_path(),
workspace_dir.as_str(),
tera_context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.terraform_resource_dir_path(),
workspace_dir.as_str(),
&tera_context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
let external_svc_dir = format!("{}/{}", workspace_dir, "external-name-svc");
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_external_name_service_dir(),
external_svc_dir.to_string(),
tera_context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_external_name_service_dir(),
format!("{}/{}", workspace_dir, "external-name-svc"),
&tera_context,
),
)?;
external_svc_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
let _ = cast_simple_error_to_engine_error(
service.engine_error_scope(),
service.context().execution_id(),
crate::template::generate_and_copy_all_files_into_dir(
if let Err(e) = crate::template::generate_and_copy_all_files_into_dir(
service.helm_chart_external_name_service_dir(),
workspace_dir.as_str(),
tera_context.clone(),
) {
return Err(NewEngineError::new_cannot_copy_files_from_one_directory_to_another(
event_details.clone(),
service.helm_chart_external_name_service_dir(),
workspace_dir.as_str(),
&tera_context,
),
)?;
workspace_dir.to_string(),
e,
)
.to_legacy_engine_error());
}
match crate::cmd::terraform::terraform_init_validate_destroy(workspace_dir.as_str(), true) {
Ok(_) => {
@@ -821,7 +908,12 @@ where
// If not managed, we use helm to deploy
let helm_release_name = service.helm_release_name();
// clean the resource
let _ = helm_uninstall_release(kubernetes, environment, helm_release_name.as_str())?;
let _ = helm_uninstall_release(
kubernetes,
environment,
helm_release_name.as_str(),
event_details.clone(),
)?;
}
Ok(())
@@ -919,11 +1011,12 @@ pub fn check_kubernetes_service_error<T>(
result: Result<(), EngineError>,
kubernetes: &dyn Kubernetes,
service: &Box<T>,
event_details: EventDetails,
deployment_target: &DeploymentTarget,
listeners_helper: &ListenersHelper,
action_verb: &str,
action: CheckAction,
) -> Result<(), EngineError>
) -> Result<(), NewEngineError>
where
T: Service + ?Sized,
{
@@ -975,7 +1068,7 @@ where
CheckAction::Delete => listeners_helper.delete_error(progress_info),
}
let debug_logs = service.debug_logs(deployment_target);
let debug_logs = service.debug_logs(deployment_target, event_details.clone());
let debug_logs_string = if !debug_logs.is_empty() {
debug_logs.join("\n")
} else {
@@ -997,7 +1090,10 @@ where
CheckAction::Delete => listeners_helper.delete_error(progress_info),
}
return Err(err);
return Err(NewEngineError::new_k8s_service_issue(
event_details.clone(),
CommandError::new(err.message.unwrap_or("No error message.".to_string()), None),
));
}
_ => {
let progress_info = ProgressInfo::new(
@@ -1032,6 +1128,7 @@ pub fn get_stateless_resource_information_for_user<T>(
kubernetes: &dyn Kubernetes,
environment: &Environment,
service: &T,
event_details: EventDetails,
) -> Result<Vec<String>, EngineError>
where
T: Service + ?Sized,
@@ -1044,32 +1141,33 @@ where
};
// get logs
let logs = cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_logs(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector.as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
),
let logs = crate::cmd::kubectl::kubectl_exec_logs(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector.as_str(),
kubernetes.cloud_provider().credentials_environment_variables(),
)
.unwrap_or_else(|_| vec![format!("Unable to retrieve logs for pod with selector: {:?}", selector,)]);
.map_err(|e| {
NewEngineError::new_k8s_get_logs_error(
event_details.clone(),
selector.to_string(),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?;
let _ = result.extend(logs);
// get pod state
let pods = cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_get_pods(
kubernetes_config_file_path.to_string(),
Some(environment.namespace()),
Some(selector.as_str()),
kubernetes.cloud_provider().credentials_environment_variables(),
),
let pods = crate::cmd::kubectl::kubectl_exec_get_pods(
kubernetes_config_file_path.to_string(),
Some(environment.namespace()),
Some(selector.as_str()),
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_or_else(|_| vec![], |pods| pods.items);
.map_err(|e| NewEngineError::new_k8s_cannot_get_pods(event_details.clone(), e).to_legacy_engine_error())?
.items;
for pod in pods {
for container_condition in pod.status.conditions {
@@ -1102,16 +1200,16 @@ where
}
// get pod events
let events = cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_get_json_events(
&kubernetes_config_file_path,
environment.namespace(),
kubernetes.cloud_provider().credentials_environment_variables(),
),
let events = crate::cmd::kubectl::kubectl_exec_get_json_events(
&kubernetes_config_file_path,
environment.namespace(),
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_or_else(|_| vec![], |events| events.items);
.map_err(|e| {
NewEngineError::new_k8s_get_json_events(event_details.clone(), environment.namespace().to_string(), e)
.to_legacy_engine_error()
})?
.items;
for event in events {
if event.type_.to_lowercase() != "normal" {
@@ -1135,53 +1233,47 @@ pub fn get_stateless_resource_information(
kubernetes: &dyn Kubernetes,
environment: &Environment,
selector: &str,
stage: Stage,
) -> Result<(Describe, Logs), EngineError> {
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
Ok(path) => path,
Err(e) => return Err(e.to_legacy_engine_error()),
};
let event_details = kubernetes.get_event_details(stage);
let kubernetes_config_file_path = kubernetes
.get_kubeconfig_file_path()
.map_err(|e| e.to_legacy_engine_error())?;
// exec describe pod...
let describe = match cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_describe_pod(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector,
kubernetes.cloud_provider().credentials_environment_variables(),
),
) {
Ok(output) => {
info!("{}", output);
output
}
Err(err) => {
error!("{:?}", err);
return Err(err);
}
};
let describe = crate::cmd::kubectl::kubectl_exec_describe_pod(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_describe(
event_details.clone(),
selector.to_string(),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?;
// exec logs...
let logs = match cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_logs(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector,
kubernetes.cloud_provider().credentials_environment_variables(),
),
) {
Ok(output) => {
info!("{:?}", output);
output.join("\n")
}
Err(err) => {
error!("{:?}", err);
return Err(err);
}
};
let logs = crate::cmd::kubectl::kubectl_exec_logs(
kubernetes_config_file_path.to_string(),
environment.namespace(),
selector,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_get_logs_error(
event_details.clone(),
selector.to_string(),
environment.namespace().to_string(),
e,
)
.to_legacy_engine_error()
})?
.join("\n");
Ok((describe, logs))
}
@@ -1190,37 +1282,37 @@ pub fn helm_uninstall_release(
kubernetes: &dyn Kubernetes,
environment: &Environment,
helm_release_name: &str,
event_details: EventDetails,
) -> Result<(), EngineError> {
let kubernetes_config_file_path = match kubernetes.get_kubeconfig_file_path() {
Ok(p) => p,
Err(e) => return Err(e.to_legacy_engine_error()),
};
let kubernetes_config_file_path = kubernetes
.get_kubeconfig_file_path()
.map_err(|e| e.to_legacy_engine_error())?;
let history_rows = cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::helm::helm_exec_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
&kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
let history_rows = crate::cmd::helm::helm_exec_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
&kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_k8s_history(event_details.clone(), environment.namespace().to_string(), e)
.to_legacy_engine_error()
})?;
// if there is no valid history - then delete the helm chart
let first_valid_history_row = history_rows.iter().find(|x| x.is_successfully_deployed());
if first_valid_history_row.is_some() {
cast_simple_error_to_engine_error(
kubernetes.engine_error_scope(),
kubernetes.context().execution_id(),
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
crate::cmd::helm::helm_exec_uninstall(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name,
kubernetes.cloud_provider().credentials_environment_variables(),
)
.map_err(|e| {
NewEngineError::new_helm_chart_uninstall_error(event_details.clone(), helm_release_name.to_string(), e)
.to_legacy_engine_error()
})?;
}
Ok(())

View File

@@ -10,6 +10,7 @@ use crate::cmd::kubectl::{kubectl_exec_delete_secret, kubectl_exec_get_secrets};
use crate::cmd::structs::{HelmChart, HelmHistoryRow, HelmListItem, Secrets};
use crate::cmd::utilities::QoveryCommand;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use chrono::{DateTime, Duration, Utc};
use core::time;
use retry::delay::Fixed;
@@ -45,7 +46,7 @@ pub fn helm_exec_with_upgrade_history<P>(
timeout: Timeout<u32>,
envs: Vec<(&str, &str)>,
service_type: ServiceType,
) -> Result<Option<HelmHistoryRow>, SimpleError>
) -> Result<Option<HelmHistoryRow>, CommandError>
where
P: AsRef<Path>,
{
@@ -106,7 +107,7 @@ pub fn helm_destroy_chart_if_breaking_changes_version_detected(
kubernetes_config: &Path,
environment_variables: &Vec<(&str, &str)>,
chart_info: &ChartInfo,
) -> Result<(), SimpleError> {
) -> Result<(), CommandError> {
// If there is a breaking version set for the current helm chart,
// then we compare this breaking version with the currently installed version if any.
// If current installed version is older than breaking change one, then we delete
@@ -137,7 +138,7 @@ pub fn helm_exec_upgrade_with_chart_info<P>(
kubernetes_config: P,
envs: &Vec<(&str, &str)>,
chart: &ChartInfo,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -197,13 +198,16 @@ where
};
// no need to validate yaml as it will be done by helm
if let Err(e) = file_create() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while writing yaml content to file {}\n{}\n{}",
&file_path, value_file.yaml_content, e
)),
});
let safe_message = format!("Error while writing yaml content to file `{}`", &file_path);
return Err(CommandError::new(
format!(
"{}\nContent\n{}\nError: {}",
safe_message.to_string(),
value_file.yaml_content,
e
),
Some(safe_message.to_string()),
));
};
args_string.push("-f".to_string());
@@ -275,7 +279,7 @@ where
envs.clone(),
) {
Ok(_) => info!("Helm lock detected and cleaned"),
Err(e) => warn!("Couldn't cleanup Helm lock. {:?}", e.message),
Err(e) => warn!("Couldn't cleanup Helm lock. {:?}", e.message()),
}
}
@@ -287,14 +291,19 @@ where
OperationResult::Ok(())
}
}
Err(e) => OperationResult::Retry(e),
Err(e) => OperationResult::Retry(SimpleError::new(SimpleErrorKind::Other, Some(e.message()))),
}
});
match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(e)) => return Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(Operation { error, .. }) => {
return Err(CommandError::new(
error.message.unwrap_or("No error message".to_string()),
None,
));
}
Err(retry::Error::Internal(e)) => return Err(CommandError::new(e, None)),
}
}
@@ -304,7 +313,7 @@ pub fn clean_helm_lock<P>(
release_name: &str,
timeout: i64,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -322,22 +331,13 @@ where
match helm_get_secret_lock_name(&result, timeout_i64.clone()) {
Ok(x) => return OperationResult::Ok(x),
Err(e) => match e.kind {
ParsingError => OperationResult::Retry(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(e.message),
}),
IncorrectFormatDate => OperationResult::Retry(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(e.message),
}),
ParsingError => OperationResult::Retry(CommandError::new(e.message, None)),
IncorrectFormatDate => OperationResult::Retry(CommandError::new(e.message, None)),
NotYetExpired => {
if e.wait_before_release_lock.is_none() {
return OperationResult::Retry(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(
"missing helm time to wait information, before releasing the lock".to_string(),
),
});
return OperationResult::Retry(CommandError::new_from_safe_message(
"Missing helm time to wait information, before releasing the lock".to_string(),
));
};
let time_to_wait = e.wait_before_release_lock.unwrap() as u64;
@@ -346,19 +346,13 @@ where
info!("waiting {}s before retrying the deployment...", time_to_wait);
thread::sleep(time::Duration::from_secs(time_to_wait));
} else {
return OperationResult::Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(e.message),
});
return OperationResult::Err(CommandError::new(e.message, None));
}
// retrieve now the secret
match helm_get_secret_lock_name(&result, timeout_i64.clone()) {
Ok(x) => OperationResult::Ok(x),
Err(e) => OperationResult::Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(e.message),
}),
Err(e) => OperationResult::Err(CommandError::new(e.message, None)),
}
}
},
@@ -368,14 +362,11 @@ where
match result {
Err(err) => {
return match err {
retry::Error::Operation { .. } => Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"internal error while trying to deploy helm chart {}",
release_name
)),
}),
retry::Error::Internal(err) => Err(SimpleError::new(SimpleErrorKind::Other, Some(err))),
retry::Error::Operation { .. } => Err(CommandError::new_from_safe_message(format!(
"internal error while trying to deploy helm chart {}",
release_name
))),
retry::Error::Internal(err) => Err(CommandError::new_from_safe_message(err)),
}
}
Ok(x) => {
@@ -451,7 +442,7 @@ pub fn helm_exec_uninstall_with_chart_info<P>(
kubernetes_config: P,
envs: &Vec<(&str, &str)>,
chart: &ChartInfo,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -475,7 +466,7 @@ pub fn helm_exec_uninstall<P>(
namespace: &str,
release_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -499,12 +490,12 @@ pub fn helm_exec_history<P>(
namespace: &str,
release_name: &str,
envs: &Vec<(&str, &str)>,
) -> Result<Vec<HelmHistoryRow>, SimpleError>
) -> Result<Vec<HelmHistoryRow>, CommandError>
where
P: AsRef<Path>,
{
let mut output_string = String::new();
match helm_exec_with_output(
helm_exec_with_output(
// WARN: do not add argument --debug, otherwise JSON decoding will not work
vec![
"history",
@@ -525,10 +516,8 @@ where
error!("{}", line)
}
},
) {
Ok(_) => info!("Helm history success for release name: {}", release_name),
Err(_) => info!("Helm history found for release name: {}", release_name),
};
)?;
// TODO better check, release not found
let mut results = match serde_json::from_str::<Vec<HelmHistoryRow>>(output_string.as_str()) {
@@ -589,7 +578,7 @@ pub fn helm_exec_upgrade_with_override_file<P>(
chart_root_dir: P,
override_file: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -630,7 +619,7 @@ pub fn helm_exec_with_upgrade_history_with_override<P>(
chart_root_dir: P,
override_file: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<HelmHistoryRow>, SimpleError>
) -> Result<Option<HelmHistoryRow>, CommandError>
where
P: AsRef<Path>,
{
@@ -670,7 +659,7 @@ pub fn is_chart_deployed<P>(
envs: Vec<(&str, &str)>,
namespace: Option<&str>,
chart_name: String,
) -> Result<bool, SimpleError>
) -> Result<bool, CommandError>
where
P: AsRef<Path>,
{
@@ -719,7 +708,7 @@ pub fn helm_list<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
namespace: Option<&str>,
) -> Result<Vec<HelmChart>, SimpleError>
) -> Result<Vec<HelmChart>, CommandError>
where
P: AsRef<Path>,
{
@@ -755,9 +744,11 @@ where
}
}
Err(e) => {
let message = format!("Error while deserializing all helms names {}", e);
error!("{}", message.as_str());
return Err(SimpleError::new(SimpleErrorKind::Other, Some(message)));
let message_safe = "Error while deserializing all helms names";
return Err(CommandError::new(
format!("{}, error: {}", message_safe, e),
Some(message_safe.to_string()),
));
}
}
@@ -768,7 +759,7 @@ pub fn helm_upgrade_diff_with_chart_info<P>(
kubernetes_config: P,
envs: &Vec<(String, String)>,
chart: &ChartInfo,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -805,13 +796,16 @@ where
};
// no need to validate yaml as it will be done by helm
if let Err(e) = file_create() {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!(
"error while writing yaml content to file {}\n{}\n{}",
&file_path, value_file.yaml_content, e
)),
});
let safe_message = format!("Error while writing yaml content to file `{}`", &file_path);
return Err(CommandError::new(
format!(
"{}\nContent\n{}\nError: {}",
safe_message.to_string(),
value_file.yaml_content,
e
),
Some(safe_message.to_string()),
));
};
args_string.push("-f".to_string());
@@ -833,7 +827,7 @@ where
)
}
pub fn helm_exec(args: Vec<&str>, envs: Vec<(&str, &str)>) -> Result<(), SimpleError> {
pub fn helm_exec(args: Vec<&str>, envs: Vec<(&str, &str)>) -> Result<(), CommandError> {
helm_exec_with_output(
args,
envs,
@@ -851,7 +845,7 @@ pub fn helm_exec_with_output<F, X>(
envs: Vec<(&str, &str)>,
stdout_output: F,
stderr_output: X,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
F: FnMut(String),
X: FnMut(String),
@@ -861,7 +855,7 @@ where
// It means that the command successfully ran, but it didn't terminate as expected
let mut cmd = QoveryCommand::new("helm", &args, &envs);
match cmd.exec_with_timeout(Duration::max_value(), stdout_output, stderr_output) {
Err(err) => Err(SimpleError::new(SimpleErrorKind::Other, Some(format!("{}", err)))),
Err(err) => Err(CommandError::new(format!("{:?}", err), None)),
_ => Ok(()),
}
}

View File

@@ -15,6 +15,7 @@ use crate::cmd::structs::{
use crate::cmd::utilities::QoveryCommand;
use crate::constants::KUBECONFIG;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
pub enum ScalingKind {
Deployment,
@@ -33,7 +34,7 @@ pub fn kubectl_exec_with_output<F, X>(
envs: Vec<(&str, &str)>,
stdout_output: F,
stderr_output: X,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
F: FnMut(String),
X: FnMut(String),
@@ -44,7 +45,14 @@ where
let args_string = args.join(" ");
let msg = format!("Error on command: kubectl {}. {:?}", args_string, &err);
error!("{}", &msg);
return Err(SimpleError::new(SimpleErrorKind::Other, Some(msg)));
return Err(CommandError::new_from_command_line(
"Error while executing a kubectl command.".to_string(),
"kubectl".to_string(),
args.into_iter().map(|a| a.to_string()).collect(),
envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect(),
None,
None,
));
};
Ok(())
@@ -55,7 +63,7 @@ pub fn kubectl_exec_get_number_of_restart<P>(
namespace: &str,
pod_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<String, SimpleError>
) -> Result<String, CommandError>
where
P: AsRef<Path>,
{
@@ -86,32 +94,38 @@ pub fn do_kubectl_exec_describe_service<P>(
kubernetes_config: P,
namespace: &str,
service_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<DoLoadBalancer, SimpleError>
input_envs: Vec<(&str, &str)>,
) -> Result<DoLoadBalancer, CommandError>
where
P: AsRef<Path>,
{
let mut _envs = Vec::with_capacity(envs.len() + 1);
_envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap()));
_envs.extend(envs);
let mut envs = Vec::with_capacity(input_envs.len() + 1);
envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap()));
envs.extend(input_envs);
let mut output_vec: Vec<String> = Vec::with_capacity(20);
let mut err_output_vec: Vec<String> = Vec::with_capacity(20);
let cmd_args = vec!["get", "svc", "-n", namespace, service_name, "-o", "json"];
let _ = kubectl_exec_with_output(
vec!["get", "svc", "-n", namespace, service_name, "-o", "json"],
_envs,
cmd_args.clone(),
envs.clone(),
|line| output_vec.push(line),
|line| error!("{}", line),
|line| err_output_vec.push(line),
)?;
let output_string: String = output_vec.join("\n");
let err_output_string: String = output_vec.join("\n");
match serde_json::from_str::<DoLoadBalancer>(output_string.as_str()) {
Ok(x) => Ok(x),
Err(err) => {
error!("{:?}", err);
error!("{}", output_string.as_str());
Err(SimpleError::new(SimpleErrorKind::Other, Some(output_string)))
}
Err(err) => Err(CommandError::new_from_command_line(
format!("Error while executing kubectl command: {:?}", err),
"kubectl".to_string(),
cmd_args.into_iter().map(|a| a.to_string()).collect(),
envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect(),
Some(output_string.to_string()),
Some(err_output_string.to_string()),
)),
}
}
@@ -121,7 +135,7 @@ pub fn do_kubectl_exec_get_external_ingress_ip<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<String>, SimpleError>
) -> Result<Option<String>, CommandError>
where
P: AsRef<Path>,
{
@@ -142,7 +156,7 @@ pub fn do_kubectl_exec_get_loadbalancer_id<P>(
namespace: &str,
service_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<String>, SimpleError>
) -> Result<Option<String>, CommandError>
where
P: AsRef<Path>,
{
@@ -169,7 +183,7 @@ pub fn kubectl_exec_get_external_ingress_hostname<P>(
namespace: &str,
name: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<String>, SimpleError>
) -> Result<Option<String>, CommandError>
where
P: AsRef<Path>,
{
@@ -193,7 +207,7 @@ pub fn kubectl_exec_is_pod_ready_with_retry<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<bool>, SimpleError>
) -> Result<Option<bool>, CommandError>
where
P: AsRef<Path>,
{
@@ -225,7 +239,7 @@ where
total_delay: _,
tries: _,
} => Ok(Some(false)),
retry::Error::Internal(err) => Err(SimpleError::new(SimpleErrorKind::Other, Some(err))),
retry::Error::Internal(err) => Err(CommandError::new_from_safe_message(err)),
},
Ok(_) => Ok(Some(true)),
}
@@ -236,7 +250,7 @@ pub fn kubectl_exec_get_secrets<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<Secrets, SimpleError>
) -> Result<Secrets, CommandError>
where
P: AsRef<Path>,
{
@@ -262,7 +276,7 @@ pub fn kubectl_exec_is_pod_ready<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<bool>, SimpleError>
) -> Result<Option<bool>, CommandError>
where
P: AsRef<Path>,
{
@@ -326,7 +340,7 @@ pub fn kubectl_exec_is_job_ready<P>(
namespace: &str,
job_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<Option<bool>, SimpleError>
) -> Result<Option<bool>, CommandError>
where
P: AsRef<Path>,
{
@@ -370,7 +384,7 @@ pub fn kubectl_exec_create_namespace<P>(
namespace: &str,
labels: Option<Vec<LabelsContent>>,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -405,22 +419,21 @@ pub fn kubectl_add_labels_to_namespace<P>(
namespace: &str,
labels: Vec<LabelsContent>,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
if labels.is_empty() {
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some("No labels were defined, can't set them"),
return Err(CommandError::new_from_safe_message(
"No labels were defined, can't set them".to_string(),
));
};
if !kubectl_exec_is_namespace_present(kubernetes_config.as_ref(), namespace, envs.clone()) {
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format! {"Can't set labels on namespace {} because it doesn't exists", namespace}),
));
return Err(CommandError::new_from_safe_message(format!(
"Can't set labels on namespace {} because it doesn't exists",
namespace
)));
}
let mut command_args = Vec::new();
@@ -447,7 +460,7 @@ pub fn does_contain_terraform_tfstate<P>(
kubernetes_config: P,
namespace: &str,
envs: &Vec<(&str, &str)>,
) -> Result<bool, SimpleError>
) -> Result<bool, CommandError>
where
P: AsRef<Path>,
{
@@ -484,7 +497,7 @@ where
pub fn kubectl_exec_get_all_namespaces<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
) -> Result<Vec<String>, SimpleError>
) -> Result<Vec<String>, CommandError>
where
P: AsRef<Path>,
{
@@ -509,25 +522,15 @@ pub fn kubectl_exec_delete_namespace<P>(
kubernetes_config: P,
namespace: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
match does_contain_terraform_tfstate(&kubernetes_config, &namespace, &envs) {
Ok(exist) => match exist {
true => {
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some("Namespace contains terraform tfstates in secret, can't delete it !"),
));
}
false => info!(
"Namespace {} doesn't contain any tfstates, able to delete it",
namespace
),
},
Err(_) => debug!("Unable to execute describe on secrets. it may not exist anymore"),
};
if does_contain_terraform_tfstate(&kubernetes_config, &namespace, &envs)? {
return Err(CommandError::new_from_safe_message(
"Namespace contains terraform tfstates in secret, can't delete it !".to_string(),
));
}
let mut _envs = Vec::with_capacity(envs.len() + 1);
_envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap()));
@@ -547,7 +550,7 @@ pub fn kubectl_exec_delete_crd<P>(
kubernetes_config: P,
crd_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -570,7 +573,7 @@ pub fn kubectl_exec_delete_secret<P>(
namespace: &str,
secret: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -593,7 +596,7 @@ pub fn kubectl_exec_logs<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<Vec<String>, SimpleError>
) -> Result<Vec<String>, CommandError>
where
P: AsRef<Path>,
{
@@ -617,7 +620,7 @@ pub fn kubectl_exec_describe_pod<P>(
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
) -> Result<String, SimpleError>
) -> Result<String, CommandError>
where
P: AsRef<Path>,
{
@@ -636,7 +639,7 @@ where
Ok(output_vec.join("\n"))
}
pub fn kubectl_exec_version<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<KubernetesVersion, SimpleError>
pub fn kubectl_exec_version<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<KubernetesVersion, CommandError>
where
P: AsRef<Path>,
{
@@ -649,7 +652,7 @@ pub fn kubectl_exec_get_daemonset<P>(
namespace: &str,
selectors: Option<&str>,
envs: Vec<(&str, &str)>,
) -> Result<Daemonset, SimpleError>
) -> Result<Daemonset, CommandError>
where
P: AsRef<Path>,
{
@@ -672,7 +675,7 @@ pub fn kubectl_exec_rollout_restart_deployment<P>(
name: &str,
namespace: &str,
envs: &Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -691,7 +694,7 @@ where
pub fn kubectl_exec_get_node<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesList<KubernetesNode>, SimpleError>
) -> Result<KubernetesList<KubernetesNode>, CommandError>
where
P: AsRef<Path>,
{
@@ -702,7 +705,7 @@ pub fn kubectl_exec_count_all_objects<P>(
kubernetes_config: P,
object_kind: &str,
envs: Vec<(&str, &str)>,
) -> Result<usize, SimpleError>
) -> Result<usize, CommandError>
where
P: AsRef<Path>,
{
@@ -721,7 +724,7 @@ pub fn kubectl_exec_get_pods<P>(
namespace: Option<&str>,
selector: Option<&str>,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesList<KubernetesPod>, SimpleError>
) -> Result<KubernetesList<KubernetesPod>, CommandError>
where
P: AsRef<Path>,
{
@@ -756,7 +759,7 @@ pub fn kubectl_exec_get_pod_by_name<P>(
namespace: Option<&str>,
pod_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesPod, SimpleError>
) -> Result<KubernetesPod, CommandError>
where
P: AsRef<Path>,
{
@@ -780,7 +783,7 @@ pub fn kubectl_exec_get_configmap<P>(
namespace: &str,
name: &str,
envs: Vec<(&str, &str)>,
) -> Result<Configmap, SimpleError>
) -> Result<Configmap, CommandError>
where
P: AsRef<Path>,
{
@@ -795,7 +798,7 @@ pub fn kubectl_exec_get_json_events<P>(
kubernetes_config: P,
namespace: &str,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesList<KubernetesEvent>, SimpleError>
) -> Result<KubernetesList<KubernetesEvent>, CommandError>
where
P: AsRef<Path>,
{
@@ -811,7 +814,7 @@ pub fn kubectl_exec_get_events<P>(
kubernetes_config: P,
namespace: Option<&str>,
envs: Vec<(&str, &str)>,
) -> Result<String, SimpleError>
) -> Result<String, CommandError>
where
P: AsRef<Path>,
{
@@ -836,7 +839,7 @@ pub fn kubectl_delete_objects_in_all_namespaces<P>(
kubernetes_config: P,
object: &str,
envs: Vec<(&str, &str)>,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -849,15 +852,11 @@ where
match result {
Ok(_) => Ok(()),
Err(e) => {
match &e.message {
Some(message) => {
if message.contains("No resources found") || message.ends_with(" deleted") {
return Ok(());
}
}
None => {}
};
Err(e)
let lower_case_message = e.message().to_lowercase();
if lower_case_message.contains("no resources found") || lower_case_message.ends_with(" deleted") {
return Ok(());
}
return Err(e);
}
}
}
@@ -877,7 +876,7 @@ pub fn kubectl_exec_api_custom_metrics<P>(
namespace: &str,
specific_pod_name: Option<&str>,
metric_name: &str,
) -> Result<KubernetesApiMetrics, SimpleError>
) -> Result<KubernetesApiMetrics, CommandError>
where
P: AsRef<Path>,
{
@@ -906,7 +905,7 @@ pub fn kubectl_exec_scale_replicas<P>(
kind: ScalingKind,
name: &str,
replicas_count: u32,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -952,7 +951,7 @@ pub fn kubectl_exec_scale_replicas_by_selector<P>(
kind: ScalingKind,
selector: &str,
replicas_count: u32,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -993,7 +992,6 @@ where
0 => PodCondition::Delete,
_ => PodCondition::Ready,
};
info!("waiting for the pods to get the expected status: {:?}", &condition);
kubectl_exec_wait_for_pods_condition(kubernetes_config, envs, namespace, selector, condition)
}
@@ -1003,7 +1001,7 @@ pub fn kubectl_exec_wait_for_pods_condition<P>(
namespace: &str,
selector: &str,
condition: PodCondition,
) -> Result<(), SimpleError>
) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -1036,7 +1034,7 @@ where
)
}
pub fn kubectl_get_pvc<P>(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result<PVC, SimpleError>
pub fn kubectl_get_pvc<P>(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result<PVC, CommandError>
where
P: AsRef<Path>,
{
@@ -1047,7 +1045,7 @@ where
)
}
pub fn kubectl_get_svc<P>(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result<SVC, SimpleError>
pub fn kubectl_get_svc<P>(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> Result<SVC, CommandError>
where
P: AsRef<Path>,
{
@@ -1071,7 +1069,7 @@ pub fn kubectl_delete_crash_looping_pods<P>(
namespace: Option<&str>,
selector: Option<&str>,
envs: Vec<(&str, &str)>,
) -> Result<Vec<KubernetesPod>, SimpleError>
) -> Result<Vec<KubernetesPod>, CommandError>
where
P: AsRef<Path>,
{
@@ -1110,7 +1108,7 @@ pub fn kubectl_get_crash_looping_pods<P>(
selector: Option<&str>,
restarted_min_count: Option<usize>,
envs: Vec<(&str, &str)>,
) -> Result<Vec<KubernetesPod>, SimpleError>
) -> Result<Vec<KubernetesPod>, CommandError>
where
P: AsRef<Path>,
{
@@ -1154,7 +1152,7 @@ pub fn kubectl_exec_delete_pod<P>(
pod_namespace: &str,
pod_name: &str,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesPod, SimpleError>
) -> Result<KubernetesPod, CommandError>
where
P: AsRef<Path>,
{
@@ -1181,11 +1179,11 @@ where
|_| {},
) {
Ok(_) => Ok(pod_to_be_deleted),
Err(e) => Err(e),
Err(e) => Err(CommandError::new(e.message(), None)),
}
}
fn kubectl_exec<P, T>(args: Vec<&str>, kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<T, SimpleError>
fn kubectl_exec<P, T>(args: Vec<&str>, kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<T, CommandError>
where
P: AsRef<Path>,
T: DeserializeOwned,
@@ -1214,15 +1212,17 @@ where
env_vars_in_vec.push(x.1.to_string());
});
let environment_variables = env_vars_in_vec.join(" ");
error!(
"json parsing error on {:?} on command: {} kubectl {}. {:?}",
std::any::type_name::<T>(),
environment_variables,
args_string,
err
);
error!("{}", output_string.as_str());
return Err(SimpleError::new(SimpleErrorKind::Other, Some(output_string)));
return Err(CommandError::new(
format!(
"JSON parsing error on {:?} on command: {} kubectl {}, output: {}. {:?}",
std::any::type_name::<T>(),
environment_variables,
args_string,
output_string,
err
),
Some("JSON parsing error on kubectl command.".to_string()),
));
}
};
@@ -1233,7 +1233,7 @@ pub fn kubernetes_get_all_pdbs<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
namespace: Option<&str>,
) -> Result<PDB, SimpleError>
) -> Result<PDB, CommandError>
where
P: AsRef<Path>,
{
@@ -1254,7 +1254,7 @@ pub fn kubernetes_get_all_hpas<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
namespace: Option<&str>,
) -> Result<HPA, SimpleError>
) -> Result<HPA, CommandError>
where
P: AsRef<Path>,
{

View File

@@ -363,6 +363,7 @@ pub struct HelmListItem {
pub app_version: String,
}
#[derive(Clone, PartialEq)]
pub struct HelmChart {
pub name: String,
pub namespace: String,

View File

@@ -4,55 +4,46 @@ use retry::OperationResult;
use crate::cmd::utilities::QoveryCommand;
use crate::constants::TF_PLUGIN_CACHE_DIR;
use crate::error::SimpleErrorKind::Other;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use rand::Rng;
use retry::Error::Operation;
use std::{env, fs, thread, time};
fn manage_common_issues(terraform_provider_lock: &String, err: &SimpleError) -> bool {
fn manage_common_issues(terraform_provider_lock: &String, err: &CommandError) -> Result<(), CommandError> {
// Error: Failed to install provider from shared cache
// in order to avoid lock errors on parallel run, let's sleep a bit
// https://github.com/hashicorp/terraform/issues/28041
let mut found_managed_issue = false;
debug!("{:?}", err);
match &err.message {
None => warn!("no know method to fix this Terraform issue"),
Some(message) => {
if message.contains("Failed to install provider from shared cache")
|| message.contains("Failed to install provider")
{
found_managed_issue = true;
let sleep_time_int = rand::thread_rng().gen_range(20..45);
let sleep_time = time::Duration::from_secs(sleep_time_int);
if err.message().contains("Failed to install provider from shared cache")
|| err.message().contains("Failed to install provider")
{
let sleep_time_int = rand::thread_rng().gen_range(20..45);
let sleep_time = time::Duration::from_secs(sleep_time_int);
warn!(
"failed to install provider from shared cache, cleaning and sleeping {} before retrying...",
sleep_time_int
);
thread::sleep(sleep_time);
// failed to install provider from shared cache, cleaning and sleeping before retrying...",
thread::sleep(sleep_time);
match fs::remove_file(&terraform_provider_lock) {
Ok(_) => info!("terraform lock file {} has been removed", &terraform_provider_lock),
Err(e) => error!(
"wasn't able to delete terraform lock file {}: {}",
&terraform_provider_lock, e
),
}
}
return match fs::remove_file(&terraform_provider_lock) {
Ok(_) => Ok(()),
Err(e) => Err(CommandError::new(
format!("Wasn't able to delete terraform lock file {}", &terraform_provider_lock),
Some(format!(
"Wasn't able to delete terraform lock file {}, error: {:?}",
&terraform_provider_lock, e
)),
)),
};
} else if err.message().contains("Plugin reinitialization required") {
// terraform init is required
return Ok(());
}
if message.contains("Plugin reinitialization required") {
warn!("terraform init is required");
found_managed_issue = true;
}
}
};
found_managed_issue
Err(CommandError::new_from_safe_message(
"Not known method to fix this Terraform issue".to_string(),
))
}
fn terraform_init_validate(root_dir: &str) -> Result<(), SimpleError> {
fn terraform_init_validate(root_dir: &str) -> Result<(), CommandError> {
let terraform_provider_lock = format!("{}/.terraform.lock.hcl", &root_dir);
let result = retry::retry(Fixed::from_millis(3000).take(5), || {
@@ -60,8 +51,8 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), SimpleError> {
match terraform_exec(root_dir, vec!["init", "-no-color"]) {
Ok(_) => OperationResult::Ok(()),
Err(err) => {
manage_common_issues(&terraform_provider_lock, &err);
error!("error while trying to run terraform init, retrying...");
let _ = manage_common_issues(&terraform_provider_lock, &err);
// Error while trying to run terraform init, retrying...
OperationResult::Retry(err)
}
};
@@ -70,8 +61,8 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), SimpleError> {
match terraform_exec(root_dir, vec!["validate", "-no-color"]) {
Ok(_) => OperationResult::Ok(()),
Err(err) => {
manage_common_issues(&terraform_provider_lock, &err);
error!("error while trying to Terraform validate on the rendered templates");
let _ = manage_common_issues(&terraform_provider_lock, &err);
// error while trying to Terraform validate on the rendered templates
OperationResult::Retry(err)
}
}
@@ -80,11 +71,12 @@ fn terraform_init_validate(root_dir: &str) -> Result<(), SimpleError> {
match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(e)) => return Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(retry::Error::Internal(e)) => return Err(CommandError::new(e, None)),
}
}
pub fn terraform_init_validate_plan_apply(root_dir: &str, dry_run: bool) -> Result<(), SimpleError> {
pub fn terraform_init_validate_plan_apply(root_dir: &str, dry_run: bool) -> Result<(), CommandError> {
// terraform init
if let Err(e) = terraform_init_validate(root_dir) {
return Err(e);
}
@@ -95,7 +87,7 @@ pub fn terraform_init_validate_plan_apply(root_dir: &str, dry_run: bool) -> Resu
match terraform_exec(root_dir, vec!["plan", "-no-color", "-out", "tf_plan"]) {
Ok(out) => OperationResult::Ok(out),
Err(err) => {
error!("While trying to Terraform plan the rendered templates");
// Error while trying to Terraform plan the rendered templates
OperationResult::Retry(err)
}
}
@@ -104,17 +96,14 @@ pub fn terraform_init_validate_plan_apply(root_dir: &str, dry_run: bool) -> Resu
return match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(e)) => Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)),
};
}
match terraform_plan_apply(root_dir) {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
terraform_plan_apply(root_dir)
}
pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy: bool) -> Result<(), SimpleError> {
pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy: bool) -> Result<(), CommandError> {
// terraform init
if let Err(e) = terraform_init_validate(root_dir) {
return Err(e);
@@ -122,10 +111,7 @@ pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy:
// better to apply before destroy to ensure terraform destroy will delete on all resources
if run_apply_before_destroy {
match terraform_plan_apply(root_dir) {
Ok(_) => {}
Err(e) => return Err(e),
}
terraform_plan_apply(root_dir)?;
}
// terraform destroy
@@ -133,7 +119,7 @@ pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy:
match terraform_exec(root_dir, vec!["destroy", "-no-color", "-auto-approve"]) {
Ok(out) => OperationResult::Ok(out),
Err(err) => {
error!("error while trying to run terraform destroy on rendered templates, retrying...");
// Error while trying to run terraform destroy on rendered templates, retrying...
OperationResult::Retry(err)
}
}
@@ -142,25 +128,22 @@ pub fn terraform_init_validate_destroy(root_dir: &str, run_apply_before_destroy:
match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(e)) => Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)),
}
}
fn terraform_plan_apply(root_dir: &str) -> Result<(), SimpleError> {
fn terraform_plan_apply(root_dir: &str) -> Result<(), CommandError> {
let result = retry::retry(Fixed::from_millis(3000).take(5), || {
// plan
match terraform_exec(root_dir, vec!["plan", "-no-color", "-out", "tf_plan"]) {
Ok(_) => {}
Err(err) => {
error!("While trying to Terraform plan the rendered templates");
return OperationResult::Retry(err);
}
};
if let Err(err) = terraform_exec(root_dir, vec!["plan", "-no-color", "-out", "tf_plan"]) {
// Error while trying to Terraform plan the rendered templates
return OperationResult::Retry(err);
}
// apply
match terraform_exec(root_dir, vec!["apply", "-no-color", "-auto-approve", "tf_plan"]) {
Ok(out) => OperationResult::Ok(out),
Err(err) => {
error!("error while trying to run terraform apply on rendered templates, retrying...");
// Error while trying to run terraform apply on rendered templates, retrying...
OperationResult::Retry(err)
}
}
@@ -169,11 +152,11 @@ fn terraform_plan_apply(root_dir: &str) -> Result<(), SimpleError> {
match result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(e)) => Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)),
}
}
pub fn terraform_init_validate_state_list(root_dir: &str) -> Result<Vec<String>, SimpleError> {
pub fn terraform_init_validate_state_list(root_dir: &str) -> Result<Vec<String>, CommandError> {
// terraform init and validate
if let Err(e) = terraform_init_validate(root_dir) {
return Err(e);
@@ -184,7 +167,7 @@ pub fn terraform_init_validate_state_list(root_dir: &str) -> Result<Vec<String>,
match terraform_exec(root_dir, vec!["state", "list"]) {
Ok(out) => OperationResult::Ok(out),
Err(err) => {
error!("error while trying to run terraform state list, retrying...");
// Error while trying to run terraform state list, retrying...
OperationResult::Retry(err)
}
}
@@ -193,11 +176,11 @@ pub fn terraform_init_validate_state_list(root_dir: &str) -> Result<Vec<String>,
match result {
Ok(output) => Ok(output),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(e)) => Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
Err(retry::Error::Internal(e)) => Err(CommandError::new(e, None)),
}
}
pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result<Vec<String>, SimpleError> {
pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result<Vec<String>, CommandError> {
// override if environment variable is set
let tf_plugin_cache_dir_value = match env::var_os(TF_PLUGIN_CACHE_DIR) {
Some(val) => format!("{:?}", val),
@@ -218,11 +201,9 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result<Vec<String>, Si
let result = cmd.exec_with_output(
|line| {
info!("{}", line);
stdout.push(line);
},
|line| {
error!("{}", line);
stderr.push(line);
},
);
@@ -231,18 +212,14 @@ pub fn terraform_exec(root_dir: &str, args: Vec<&str>) -> Result<Vec<String>, Si
match result {
Ok(_) => Ok(stdout),
Err(e) => {
debug!("Terraform endend in error {:?}", e);
let err = SimpleError::new(Other, Some(stdout.join("\n")));
Err(err)
}
Err(_) => Err(CommandError::new(stdout.join("\n"), None)),
}
}
#[cfg(test)]
mod tests {
use crate::cmd::terraform::{manage_common_issues, terraform_init_validate};
use crate::error::{SimpleError, SimpleErrorKind};
use crate::errors::CommandError;
use std::fs;
use tracing::{span, Level};
use tracing_test::traced_test;
@@ -269,14 +246,8 @@ obtain schema: the cached package for registry.terraform.io/hashicorp/time
in the dependency lock file
"#;
let could_not_load_plugin_error = SimpleError {
kind: SimpleErrorKind::Other,
message: Some(could_not_load_plugin.to_string()),
};
assert!(manage_common_issues(
&"/tmp/do_not_exists".to_string(),
&could_not_load_plugin_error
));
let could_not_load_plugin_error = CommandError::new_from_safe_message(could_not_load_plugin.to_string());
assert!(manage_common_issues(&"/tmp/do_not_exists".to_string(), &could_not_load_plugin_error).is_ok());
}
#[test]

View File

@@ -234,7 +234,7 @@ mod tests {
#[test]
fn test_run_version_for_command() {
let ret = run_version_command_for("/bin/ls");
let ret = run_version_command_for("ls");
assert_eq!(ret.is_empty(), false);
assert_eq!(ret.contains("GNU"), true)
}

View File

@@ -4,16 +4,16 @@ use serde_derive::{Deserialize, Serialize};
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub struct SimpleError {
pub struct CommandError {
message: String,
message_safe: String,
message_unsafe: String,
}
impl From<errors::SimpleError> for SimpleError {
fn from(error: errors::SimpleError) -> Self {
SimpleError {
message: error.message,
message_safe: error.message_safe,
impl From<errors::CommandError> for CommandError {
fn from(error: errors::CommandError) -> Self {
CommandError {
message: error.message_safe.unwrap_or("".to_string()),
message_unsafe: error.message_raw,
}
}
}
@@ -21,11 +21,61 @@ impl From<errors::SimpleError> for SimpleError {
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum Tag {
/// Unknown: unknown error.
Unknown,
MissingRequiredEnvVariable,
ClusterHasNoWorkerNodes,
CannotGetWorkspaceDirectory,
UnsupportedInstanceType,
CannotRetrieveClusterConfigFile,
CannotGetClusterNodes,
NotEnoughResourcesToDeployEnvironment,
CannotUninstallHelmChart,
CannotExecuteK8sVersion,
CannotDetermineK8sMasterVersion,
CannotDetermineK8sRequestedUpgradeVersion,
CannotDetermineK8sKubeletWorkerVersion,
CannotDetermineK8sKubeProxyVersion,
CannotExecuteK8sApiCustomMetrics,
K8sPodDisruptionBudgetInInvalidState,
K8sPodsDisruptionBudgetCannotBeRetrieved,
K8sCannotDeletePod,
K8sCannotGetCrashLoopingPods,
K8sCannotGetPods,
K8sUpgradeDeployedVsRequestedVersionsInconsistency,
K8sScaleReplicas,
K8sLoadBalancerConfigurationIssue,
K8sServiceError,
K8sGetLogs,
K8sGetEvents,
K8sDescribe,
K8sHistory,
K8sCannotCreateNamespace,
K8sPodIsNotReady,
K8sNodeIsNotReadyWithTheRequestedVersion,
K8sNodeIsNotReady,
UnsupportedRegion,
UnsupportedZone,
CannotFindRequiredBinary,
SubnetsCountShouldBeEven,
CannotGetOrCreateIamRole,
CannotCopyFilesFromDirectoryToDirectory,
CannotPauseClusterTasksAreRunning,
TerraformCannotRemoveEntryOut,
TerraformNoStateFileExists,
TerraformErrorWhileExecutingPipeline,
TerraformErrorWhileExecutingDestroyPipeline,
HelmChartsSetupError,
HelmChartsDeployError,
HelmChartsUpgradeError,
HelmChartUninstallError,
HelmHistoryError,
CannotGetAnyAvailableVPC,
UnsupportedVersion,
CannotGetSupportedVersions,
CannotGetCluster,
ObjectStorageCannotCreateBucket,
ObjectStorageCannotPutFileIntoBucket,
}
impl From<errors::Tag> for Tag {
@@ -36,6 +86,59 @@ impl From<errors::Tag> for Tag {
errors::Tag::CannotRetrieveClusterConfigFile => Tag::CannotRetrieveClusterConfigFile,
errors::Tag::CannotGetClusterNodes => Tag::CannotGetClusterNodes,
errors::Tag::NotEnoughResourcesToDeployEnvironment => Tag::NotEnoughResourcesToDeployEnvironment,
errors::Tag::MissingRequiredEnvVariable => Tag::MissingRequiredEnvVariable,
errors::Tag::ClusterHasNoWorkerNodes => Tag::ClusterHasNoWorkerNodes,
errors::Tag::CannotGetWorkspaceDirectory => Tag::CannotGetWorkspaceDirectory,
errors::Tag::CannotUninstallHelmChart => Tag::CannotUninstallHelmChart,
errors::Tag::CannotExecuteK8sVersion => Tag::CannotExecuteK8sVersion,
errors::Tag::CannotDetermineK8sMasterVersion => Tag::CannotDetermineK8sMasterVersion,
errors::Tag::CannotDetermineK8sRequestedUpgradeVersion => Tag::CannotDetermineK8sRequestedUpgradeVersion,
errors::Tag::CannotDetermineK8sKubeletWorkerVersion => Tag::CannotDetermineK8sKubeletWorkerVersion,
errors::Tag::CannotDetermineK8sKubeProxyVersion => Tag::CannotDetermineK8sKubeProxyVersion,
errors::Tag::CannotExecuteK8sApiCustomMetrics => Tag::CannotExecuteK8sApiCustomMetrics,
errors::Tag::K8sPodDisruptionBudgetInInvalidState => Tag::K8sPodDisruptionBudgetInInvalidState,
errors::Tag::K8sPodsDisruptionBudgetCannotBeRetrieved => Tag::K8sPodsDisruptionBudgetCannotBeRetrieved,
errors::Tag::K8sCannotDeletePod => Tag::K8sCannotDeletePod,
errors::Tag::K8sCannotGetCrashLoopingPods => Tag::K8sCannotGetCrashLoopingPods,
errors::Tag::K8sCannotGetPods => Tag::K8sCannotGetPods,
errors::Tag::K8sUpgradeDeployedVsRequestedVersionsInconsistency => {
Tag::K8sUpgradeDeployedVsRequestedVersionsInconsistency
}
errors::Tag::K8sScaleReplicas => Tag::K8sScaleReplicas,
errors::Tag::K8sLoadBalancerConfigurationIssue => Tag::K8sLoadBalancerConfigurationIssue,
errors::Tag::K8sServiceError => Tag::K8sServiceError,
errors::Tag::K8sGetLogs => Tag::K8sGetLogs,
errors::Tag::K8sGetEvents => Tag::K8sGetEvents,
errors::Tag::K8sDescribe => Tag::K8sDescribe,
errors::Tag::K8sHistory => Tag::K8sHistory,
errors::Tag::K8sCannotCreateNamespace => Tag::K8sCannotCreateNamespace,
errors::Tag::K8sPodIsNotReady => Tag::K8sPodIsNotReady,
errors::Tag::CannotFindRequiredBinary => Tag::CannotFindRequiredBinary,
errors::Tag::SubnetsCountShouldBeEven => Tag::SubnetsCountShouldBeEven,
errors::Tag::CannotGetOrCreateIamRole => Tag::CannotGetOrCreateIamRole,
errors::Tag::CannotCopyFilesFromDirectoryToDirectory => Tag::CannotCopyFilesFromDirectoryToDirectory,
errors::Tag::CannotPauseClusterTasksAreRunning => Tag::CannotPauseClusterTasksAreRunning,
errors::Tag::TerraformCannotRemoveEntryOut => Tag::TerraformCannotRemoveEntryOut,
errors::Tag::TerraformNoStateFileExists => Tag::TerraformNoStateFileExists,
errors::Tag::TerraformErrorWhileExecutingPipeline => Tag::TerraformErrorWhileExecutingPipeline,
errors::Tag::TerraformErrorWhileExecutingDestroyPipeline => {
Tag::TerraformErrorWhileExecutingDestroyPipeline
}
errors::Tag::HelmChartsSetupError => Tag::HelmChartsSetupError,
errors::Tag::HelmChartsDeployError => Tag::HelmChartsDeployError,
errors::Tag::HelmChartsUpgradeError => Tag::HelmChartsUpgradeError,
errors::Tag::HelmChartUninstallError => Tag::HelmChartUninstallError,
errors::Tag::HelmHistoryError => Tag::HelmHistoryError,
errors::Tag::CannotGetAnyAvailableVPC => Tag::CannotGetAnyAvailableVPC,
errors::Tag::UnsupportedVersion => Tag::UnsupportedVersion,
errors::Tag::CannotGetSupportedVersions => Tag::CannotGetSupportedVersions,
errors::Tag::CannotGetCluster => Tag::CannotGetCluster,
errors::Tag::ObjectStorageCannotCreateBucket => Tag::ObjectStorageCannotCreateBucket,
errors::Tag::ObjectStorageCannotPutFileIntoBucket => Tag::ObjectStorageCannotPutFileIntoBucket,
errors::Tag::UnsupportedRegion => Tag::UnsupportedRegion,
errors::Tag::UnsupportedZone => Tag::UnsupportedZone,
errors::Tag::K8sNodeIsNotReadyWithTheRequestedVersion => Tag::K8sNodeIsNotReadyWithTheRequestedVersion,
errors::Tag::K8sNodeIsNotReady => Tag::K8sNodeIsNotReady,
}
}
}
@@ -47,8 +150,7 @@ pub struct EngineError {
event_details: EventDetails,
qovery_log_message: String,
user_log_message: String,
raw_message: Option<String>,
raw_message_safe: Option<String>,
message: Option<CommandError>,
link: Option<String>,
hint_message: Option<String>,
}
@@ -60,8 +162,10 @@ impl From<errors::EngineError> for EngineError {
event_details: EventDetails::from(error.event_details),
qovery_log_message: error.qovery_log_message,
user_log_message: error.user_log_message,
raw_message: error.raw_message,
raw_message_safe: error.raw_message_safe,
message: match error.message {
Some(msg) => Some(CommandError::from(msg)),
None => None,
},
link: error.link.map(|url| url.to_string()),
hint_message: error.hint_message,
}

File diff suppressed because it is too large Load Diff

View File

@@ -117,6 +117,7 @@ impl From<events::Stage> for Stage {
pub enum GeneralStep {
RetrieveClusterConfig,
RetrieveClusterResources,
ValidateSystemRequirements,
}
impl From<events::GeneralStep> for GeneralStep {
@@ -124,6 +125,7 @@ impl From<events::GeneralStep> for GeneralStep {
match step {
events::GeneralStep::RetrieveClusterConfig => GeneralStep::RetrieveClusterConfig,
events::GeneralStep::RetrieveClusterResources => GeneralStep::RetrieveClusterResources,
events::GeneralStep::ValidateSystemRequirements => GeneralStep::ValidateSystemRequirements,
}
}
}
@@ -135,6 +137,7 @@ pub enum InfrastructureStep {
Create,
Pause,
Resume,
Downgrade,
Upgrade,
Delete,
}
@@ -148,6 +151,7 @@ impl From<events::InfrastructureStep> for InfrastructureStep {
events::InfrastructureStep::Upgrade => InfrastructureStep::Upgrade,
events::InfrastructureStep::Delete => InfrastructureStep::Delete,
events::InfrastructureStep::Resume => InfrastructureStep::Resume,
events::InfrastructureStep::Downgrade => InfrastructureStep::Downgrade,
}
}
}

View File

@@ -49,13 +49,13 @@ impl EngineEvent {
pub fn get_message(&self) -> String {
match self {
EngineEvent::Error(engine_error) => engine_error.message(),
EngineEvent::Waiting(_details, message) => message.get_message(),
EngineEvent::Deploying(_details, message) => message.get_message(),
EngineEvent::Pausing(_details, message) => message.get_message(),
EngineEvent::Deleting(_details, message) => message.get_message(),
EngineEvent::Deployed(_details, message) => message.get_message(),
EngineEvent::Paused(_details, message) => message.get_message(),
EngineEvent::Deleted(_details, message) => message.get_message(),
EngineEvent::Waiting(_details, message) => message.message(),
EngineEvent::Deploying(_details, message) => message.message(),
EngineEvent::Pausing(_details, message) => message.message(),
EngineEvent::Deleting(_details, message) => message.message(),
EngineEvent::Deployed(_details, message) => message.message(),
EngineEvent::Paused(_details, message) => message.message(),
EngineEvent::Deleted(_details, message) => message.message(),
}
}
}
@@ -80,10 +80,22 @@ impl EventMessage {
EventMessage { raw, safe }
}
/// Returns message for event message, safe message if exists, otherwise raw.
pub fn get_message(&self) -> String {
/// Creates e new EventMessage from safe message.
///
/// Arguments
///
/// * `safe`: Event safe message string (from which all unsafe text such as passwords and tokens has been removed).
pub fn new_from_safe(safe: String) -> Self {
EventMessage {
raw: safe.to_string(),
safe: Some(safe),
}
}
/// Returns message for event message.
pub fn message(&self) -> String {
if let Some(msg) = &self.safe {
return msg.clone();
return format!("{} {}", msg.clone(), self.raw.clone());
}
self.raw.clone()
@@ -138,6 +150,8 @@ impl Display for Stage {
#[derive(Debug, Clone)]
/// GeneralStep: represents an engine general step usually shared across all engine stages
pub enum GeneralStep {
/// ValidateSystemRequirements: validating system requirements
ValidateSystemRequirements,
/// RetrieveClusterConfig: retrieving cluster configuration
RetrieveClusterConfig,
/// RetrieveClusterResources: retrieving cluster resources
@@ -152,6 +166,7 @@ impl Display for GeneralStep {
match &self {
GeneralStep::RetrieveClusterConfig => "retrieve-cluster-config",
GeneralStep::RetrieveClusterResources => "retrieve-cluster-resources",
GeneralStep::ValidateSystemRequirements => "validate-system-requirements",
}
)
}
@@ -170,6 +185,8 @@ pub enum InfrastructureStep {
Resume,
/// Upgrade: upgrade a cluster.
Upgrade,
/// Downgrade: upgrade a cluster.
Downgrade,
/// Delete: delete a cluster.
Delete,
}
@@ -184,6 +201,7 @@ impl Display for InfrastructureStep {
InfrastructureStep::Create => "create",
InfrastructureStep::Pause => "pause",
InfrastructureStep::Upgrade => "upgrade",
InfrastructureStep::Downgrade => "downgrade",
InfrastructureStep::Delete => "delete",
InfrastructureStep::Resume => "resume",
},
@@ -345,6 +363,13 @@ impl EventDetails {
}
}
/// TODO(benjaminch): remove this dirty hack
pub fn clone_changing_stage(event_details: EventDetails, stage: Stage) -> Self {
let mut event_details = event_details.clone();
event_details.stage = stage;
event_details
}
/// Returns event's provider name.
pub fn provider_kind(&self) -> Option<Kind> {
self.provider_kind.clone()
@@ -390,7 +415,7 @@ mod tests {
// setup:
let test_cases: Vec<(Option<String>, String, String)> = vec![
(None, "raw".to_string(), "raw".to_string()),
(Some("safe".to_string()), "raw".to_string(), "safe".to_string()),
(Some("safe".to_string()), "raw".to_string(), "safe raw".to_string()),
];
for tc in test_cases {
@@ -399,7 +424,7 @@ mod tests {
let event_message = EventMessage::new(raw_message, safe_message);
// validate:
assert_eq!(expected, event_message.get_message());
assert_eq!(expected, event_message.message());
}
}

View File

@@ -9,7 +9,7 @@ pub enum LogLevel {
Error,
}
pub trait Logger: Send {
pub trait Logger: Send + Sync {
fn log(&self, log_level: LogLevel, event: EngineEvent);
fn clone_dyn(&self) -> Box<dyn Logger>;
}
@@ -81,6 +81,7 @@ mod tests {
use super::*;
use crate::cloud_provider::scaleway::application::ScwRegion;
use crate::cloud_provider::Kind;
use crate::errors;
use crate::errors::EngineError;
use crate::events::{EnvironmentStep, EventDetails, EventMessage, InfrastructureStep, Stage, Transmitter};
use crate::models::QoveryIdentifier;
@@ -126,8 +127,10 @@ mod tests {
),
qovery_message.to_string(),
user_message.to_string(),
Some(raw_message.to_string()),
Some(raw_message_safe.to_string()),
Some(errors::CommandError::new(
raw_message.to_string(),
Some(raw_message_safe.to_string()),
)),
Some(link.clone()),
Some(hint.to_string()),
)),

View File

@@ -39,6 +39,10 @@ impl QoveryIdentifier {
}
}
pub fn new_random() -> Self {
Self::new(uuid::Uuid::new_v4().to_string())
}
fn extract_short(raw: &str) -> String {
let max_execution_id_chars: usize = 7;
match raw.char_indices().nth(max_execution_id_chars) {

View File

@@ -5,22 +5,20 @@ use std::io::Write;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use crate::errors::CommandError;
use tera::Error as TeraError;
use tera::{Context, Tera};
use walkdir::WalkDir;
use crate::error::{SimpleError, SimpleErrorKind};
pub fn generate_and_copy_all_files_into_dir<S, P>(from_dir: S, to_dir: P, context: &Context) -> Result<(), SimpleError>
pub fn generate_and_copy_all_files_into_dir<S, P>(from_dir: S, to_dir: P, context: Context) -> Result<(), CommandError>
where
S: AsRef<Path>,
P: AsRef<Path>,
{
// generate j2 templates
let rendered_templates = match generate_j2_template_files(from_dir.as_ref(), context) {
let rendered_templates = match generate_j2_template_files(from_dir.as_ref(), context.clone()) {
Ok(rt) => rt,
Err(e) => {
error!("{:?}", &e);
let error_msg = match e.kind {
tera::ErrorKind::TemplateNotFound(x) => format!("template not found: {}", x),
tera::ErrorKind::Msg(x) => format!("tera error: {}", x),
@@ -46,9 +44,7 @@ where
tera::ErrorKind::Utf8Conversion { .. } => "utf-8 conversion issue".to_string(),
};
error!("{}", context.clone().into_json());
error!("{}", error_msg.as_str());
return Err(SimpleError::new(SimpleErrorKind::Other, Some(error_msg)));
return Err(CommandError::new(context.into_json().to_string(), Some(error_msg)));
}
};
@@ -61,18 +57,18 @@ where
Ok(())
}
pub fn copy_non_template_files<S, P>(from: S, to: P) -> Result<(), SimpleError>
pub fn copy_non_template_files<S, P>(from: S, to: P) -> Result<(), CommandError>
where
S: AsRef<Path>,
P: AsRef<Path>,
{
match crate::fs::copy_files(from.as_ref(), to.as_ref(), true) {
Err(err) => Err(SimpleError::from(err)),
Err(err) => Err(CommandError::new(err.to_string(), None)),
Ok(x) => Ok(x),
}
}
pub fn generate_j2_template_files<P>(root_dir: P, context: &Context) -> Result<Vec<RenderedTemplate>, TeraError>
pub fn generate_j2_template_files<P>(root_dir: P, context: Context) -> Result<Vec<RenderedTemplate>, TeraError>
where
P: AsRef<Path>,
{
@@ -101,7 +97,7 @@ where
let j2_root_path: String = j2_path_split.as_slice()[..j2_path_split.len() - 1].join("/");
let file_name = j2_file_name.replace(".j2", "");
let content = tera.render(&j2_path[1..], context)?;
let content = tera.render(&j2_path[1..], &context)?;
results.push(RenderedTemplate::new(j2_root_path, file_name, content));
}
@@ -109,7 +105,7 @@ where
Ok(results)
}
pub fn write_rendered_templates(rendered_templates: &[RenderedTemplate], into: &Path) -> Result<(), SimpleError> {
pub fn write_rendered_templates(rendered_templates: &[RenderedTemplate], into: &Path) -> Result<(), CommandError> {
for rt in rendered_templates {
let dest = format!("{}/{}", into.to_str().unwrap(), rt.path_and_file_name());
@@ -124,10 +120,11 @@ pub fn write_rendered_templates(rendered_templates: &[RenderedTemplate], into: &
let _ = fs::remove_file(dest.as_str());
// create an empty file
let mut f = fs::File::create(&dest)?;
let mut f = fs::File::create(&dest).map_err(|e| CommandError::new(e.to_string(), None))?;
// write rendered template into the new file
f.write_all(rt.content.as_bytes())?;
f.write_all(rt.content.as_bytes())
.map_err(|e| CommandError::new(e.to_string(), None))?;
// perform specific action based on the extension
let extension = Path::new(&dest).extension().and_then(OsStr::to_str);

View File

@@ -288,19 +288,19 @@ impl<'a> Transaction<'a> {
Step::CreateKubernetes(kubernetes) => {
// revert kubernetes creation
if let Err(err) = kubernetes.on_create_error() {
return Err(RollbackError::CommitError(err));
return Err(RollbackError::CommitError(err.to_legacy_engine_error()));
};
}
Step::DeleteKubernetes(kubernetes) => {
// revert kubernetes deletion
if let Err(err) = kubernetes.on_delete_error() {
return Err(RollbackError::CommitError(err));
return Err(RollbackError::CommitError(err.to_legacy_engine_error()));
};
}
Step::PauseKubernetes(kubernetes) => {
// revert pause
if let Err(err) = kubernetes.on_pause_error() {
return Err(RollbackError::CommitError(err));
return Err(RollbackError::CommitError(err.to_legacy_engine_error()));
};
}
Step::BuildEnvironment(_environment_action, _option) => {
@@ -371,7 +371,7 @@ impl<'a> Transaction<'a> {
let _ = match action {
Ok(_) => {}
Err(err) => return Err(RollbackError::CommitError(err)),
Err(err) => return Err(RollbackError::CommitError(err.to_legacy_engine_error())),
};
Ok(())
@@ -389,7 +389,7 @@ impl<'a> Transaction<'a> {
let _ = match action {
Ok(_) => {}
Err(err) => return Err(RollbackError::CommitError(err)),
Err(err) => return Err(RollbackError::CommitError(err.to_legacy_engine_error())),
};
Err(RollbackError::NoFailoverEnvironment)
@@ -407,7 +407,11 @@ impl<'a> Transaction<'a> {
match step {
Step::CreateKubernetes(kubernetes) => {
// create kubernetes
match self.commit_infrastructure(*kubernetes, Action::Create, kubernetes.on_create()) {
match self.commit_infrastructure(
*kubernetes,
Action::Create,
kubernetes.on_create().map_err(|e| e.to_legacy_engine_error()),
) {
TransactionResult::Ok => {}
err => {
error!("Error while creating infrastructure: {:?}", err);
@@ -417,7 +421,11 @@ impl<'a> Transaction<'a> {
}
Step::DeleteKubernetes(kubernetes) => {
// delete kubernetes
match self.commit_infrastructure(*kubernetes, Action::Delete, kubernetes.on_delete()) {
match self.commit_infrastructure(
*kubernetes,
Action::Delete,
kubernetes.on_delete().map_err(|e| e.to_legacy_engine_error()),
) {
TransactionResult::Ok => {}
err => {
error!("Error while deleting infrastructure: {:?}", err);
@@ -427,7 +435,11 @@ impl<'a> Transaction<'a> {
}
Step::PauseKubernetes(kubernetes) => {
// pause kubernetes
match self.commit_infrastructure(*kubernetes, Action::Pause, kubernetes.on_pause()) {
match self.commit_infrastructure(
*kubernetes,
Action::Pause,
kubernetes.on_pause().map_err(|e| e.to_legacy_engine_error()),
) {
TransactionResult::Ok => {}
err => {
error!("Error while pausing infrastructure: {:?}", err);
@@ -496,7 +508,11 @@ impl<'a> Transaction<'a> {
*kubernetes,
*environment_action,
&applications_by_environment,
|qe_env| kubernetes.deploy_environment(qe_env),
|qe_env| {
kubernetes
.deploy_environment(qe_env)
.map_err(|e| e.to_legacy_engine_error())
},
) {
TransactionResult::Ok => {}
err => {
@@ -511,7 +527,11 @@ impl<'a> Transaction<'a> {
*kubernetes,
*environment_action,
&applications_by_environment,
|qe_env| kubernetes.pause_environment(qe_env),
|qe_env| {
kubernetes
.pause_environment(qe_env)
.map_err(|e| e.to_legacy_engine_error())
},
) {
TransactionResult::Ok => {}
err => {
@@ -526,7 +546,11 @@ impl<'a> Transaction<'a> {
*kubernetes,
*environment_action,
&applications_by_environment,
|qe_env| kubernetes.delete_environment(qe_env),
|qe_env| {
kubernetes
.delete_environment(qe_env)
.map_err(|e| e.to_legacy_engine_error())
},
) {
TransactionResult::Ok => {}
err => {

View File

@@ -35,7 +35,7 @@ use qovery_engine::cloud_provider::{CloudProvider, Kind};
use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas;
use qovery_engine::cmd::structs::SVCItem;
use qovery_engine::engine::Engine;
use qovery_engine::error::{SimpleError, SimpleErrorKind};
use qovery_engine::errors::CommandError;
use qovery_engine::logger::Logger;
use qovery_engine::models::DatabaseMode::CONTAINER;
use qovery_engine::transaction::DeploymentOption;
@@ -1539,7 +1539,7 @@ pub fn cluster_test(
test_name.to_string()
}
pub fn metrics_server_test<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<(), SimpleError>
pub fn metrics_server_test<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<(), CommandError>
where
P: AsRef<Path>,
{
@@ -1556,10 +1556,9 @@ where
.expect("No hpa condition.")
.contains("ValidMetricFound")
{
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some("Metrics server doesn't work".to_string()),
});
return Err(CommandError::new_from_safe_message(
"Metrics server doesn't work".to_string(),
));
}
}
Ok(())

View File

@@ -20,7 +20,7 @@ use retry::delay::Fibonacci;
use retry::OperationResult;
use std::env;
use std::fs;
use tracing::{error, info, warn};
use tracing::{info, warn};
use crate::scaleway::{
SCW_MANAGED_DATABASE_DISK_TYPE, SCW_MANAGED_DATABASE_INSTANCE_TYPE, SCW_SELF_HOSTED_DATABASE_DISK_TYPE,
@@ -35,8 +35,8 @@ use qovery_engine::constants::{
AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, DIGITAL_OCEAN_SPACES_ACCESS_ID, DIGITAL_OCEAN_SPACES_SECRET_ID,
DIGITAL_OCEAN_TOKEN, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY,
};
use qovery_engine::error::{SimpleError, SimpleErrorKind};
use qovery_engine::models::{Context, Database, DatabaseKind, DatabaseMode, Environment, Features, Metadata};
use retry::Error::Operation;
use serde::{Deserialize, Serialize};
extern crate time;
@@ -48,7 +48,7 @@ use qovery_engine::cloud_provider::digitalocean::application::DoRegion;
use qovery_engine::cmd::kubectl::{kubectl_get_pvc, kubectl_get_svc};
use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod, PVC, SVC};
use qovery_engine::cmd::utilities::QoveryCommand;
use qovery_engine::error::SimpleErrorKind::Other;
use qovery_engine::errors::CommandError;
use qovery_engine::logger::{Logger, StdIoLogger};
use qovery_engine::models::DatabaseMode::MANAGED;
use qovery_engine::object_storage::spaces::{BucketDeleteStrategy, Spaces};
@@ -480,7 +480,7 @@ pub fn kubernetes_config_path(
provider_kind: Kind,
workspace_directory: &str,
secrets: FuncTestsSecrets,
) -> Result<String, SimpleError> {
) -> Result<String, CommandError> {
let kubernetes_config_bucket_name = format!("qovery-kubeconfigs-{}", context.cluster_id());
let kubernetes_config_object_key = format!("{}.yaml", context.cluster_id());
let kubernetes_config_file_path = format!("{}/kubernetes_config_{}", workspace_directory, context.cluster_id());
@@ -504,7 +504,7 @@ fn get_kubernetes_config_file<P>(
kubernetes_config_object_key: String,
file_path: P,
secrets: FuncTestsSecrets,
) -> Result<fs::File, SimpleError>
) -> Result<fs::File, CommandError>
where
P: AsRef<Path>,
{
@@ -564,30 +564,28 @@ where
match file.read_to_string(&mut content) {
Ok(_) => Ok(content),
Err(e) => {
let message = format!("error while trying to read file, error: {}", e);
error!("{}", message);
Err(SimpleError::new(SimpleErrorKind::Other, Some(message)))
let message_safe = "Error while trying to read file";
Err(CommandError::new(
format!("{}, error: {}", message_safe.to_string(), e),
Some(message_safe.to_string()),
))
}
}
}
Err(e) => {
let message = format!(
"error while trying to get kubeconfig from spaces, error: {:?}",
e.message,
);
error!("{}", message);
Err(SimpleError::new(SimpleErrorKind::Other, e.message))
let message_safe = "Error while trying to get kubeconfig from spaces";
Err(CommandError::new(
format!(
"{}, error: {}",
message_safe.to_string(),
e.message.unwrap_or("no error message".to_string())
),
Some(message_safe.to_string()),
))
}
}
}
Err(_) => {
let message = format!("`{}` is not a valid region", region_raw);
error!("{}", message);
Err(SimpleError::new(SimpleErrorKind::Other, Some(message)))
}
Err(e) => Err(e),
}
}
Kind::Scw => {
@@ -618,19 +616,19 @@ where
));
if let Err(e) = clusters_res {
let message = format!("error while trying to get clusters, error: {}", e.to_string());
error!("{}", message);
return OperationResult::Retry(SimpleError::new(SimpleErrorKind::Other, Some(message.as_str())));
let message_safe = "Error while trying to get clusters";
return OperationResult::Retry(CommandError::new(
format!("{}, error: {}", message_safe.to_string(), e.to_string()),
Some(message_safe.to_string()),
));
}
let clusters = clusters_res.unwrap();
if clusters.clusters.is_none() {
let message = "error while trying to get clusters, error: no clusters found";
error!("{}", message);
return OperationResult::Retry(SimpleError::new(SimpleErrorKind::Other, Some(message)));
return OperationResult::Retry(CommandError::new_from_safe_message(
"Error while trying to get clusters".to_string(),
));
}
let clusters = clusters.clusters.unwrap();
@@ -661,13 +659,10 @@ where
);
}
Err(e) => {
let message =
format!("error while trying to get clusters, error: {}", e.to_string());
error!("{}", message);
return OperationResult::Retry(SimpleError::new(
SimpleErrorKind::Other,
Some(message.as_str()),
let message_safe = "Error while trying to get clusters";
return OperationResult::Retry(CommandError::new(
format!("{}, error: {}", message_safe.to_string(), e.to_string()),
Some(message_safe.to_string()),
));
}
};
@@ -676,7 +671,9 @@ where
}
}
Err(SimpleError::new(SimpleErrorKind::Other, Some("Test cluster not found")))
Err(CommandError::new_from_safe_message(
"Test cluster not found".to_string(),
))
}
};
@@ -688,11 +685,9 @@ where
let file_content = match file_content_result {
Ok(file_content) => file_content,
Err(_) => {
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some("file content is empty (retry failed multiple times) - which is not the expected content - what's wrong?"),
));
Err(Operation { error, .. }) => return Err(error),
Err(retry::Error::Internal(msg)) => {
return Err(CommandError::new_from_safe_message(msg));
}
};
@@ -700,12 +695,25 @@ where
.create(true)
.write(true)
.truncate(true)
.open(file_path.as_ref())?;
let _ = kubernetes_config_file.write_all(file_content.as_bytes())?;
.open(file_path.as_ref())
.map_err(|e| {
let message_safe = format!("Error opening kubeconfig file.");
CommandError::new(
format!("{}, error: {}", message_safe.to_string(), e.to_string()),
Some(message_safe.to_string()),
)
})?;
let _ = kubernetes_config_file
.write_all(file_content.as_bytes())
.map_err(|_| CommandError::new_from_safe_message("Error while trying to write into file.".to_string()))?;
// removes warning kubeconfig is (world/group) readable
let mut perms = fs::metadata(file_path.as_ref())?.permissions();
let mut perms = fs::metadata(file_path.as_ref())
.map_err(|_| CommandError::new_from_safe_message("Error while trying to get file metadata.".to_string()))?
.permissions();
perms.set_readonly(false);
fs::set_permissions(file_path.as_ref(), perms)?;
fs::set_permissions(file_path.as_ref(), perms)
.map_err(|_| CommandError::new_from_safe_message("Error while trying to set file permission.".to_string()))?;
Ok(kubernetes_config_file)
}
@@ -765,7 +773,7 @@ fn aws_s3_get_object(
secret_access_key: &str,
bucket_name: &str,
object_key: &str,
) -> Result<String, SimpleError> {
) -> Result<String, CommandError> {
let local_path = format!("/tmp/{}", object_key); // FIXME: change hardcoded /tmp/
// gets an aws s3 object using aws-cli
@@ -782,8 +790,9 @@ fn aws_s3_get_object(
);
cmd.exec()
.map_err(|err| SimpleError::new(Other, Some(format!("{}", err))))?;
let s = fs::read_to_string(&local_path)?;
.map_err(|err| CommandError::new_from_safe_message(format!("{:?}", err)))?;
let s = fs::read_to_string(&local_path)
.map_err(|_| CommandError::new_from_safe_message("Error while trying to read file to string.".to_string()))?;
Ok(s)
}
@@ -829,7 +838,7 @@ pub fn get_pods(
environment_check: Environment,
pod_to_check: &str,
secrets: FuncTestsSecrets,
) -> Result<KubernetesList<KubernetesPod>, SimpleError> {
) -> Result<KubernetesList<KubernetesPod>, CommandError> {
let namespace_name = format!(
"{}-{}",
&environment_check.project_id.clone(),
@@ -901,7 +910,7 @@ pub fn get_pvc(
provider_kind: Kind,
environment_check: Environment,
secrets: FuncTestsSecrets,
) -> Result<PVC, SimpleError> {
) -> Result<PVC, CommandError> {
let namespace_name = format!(
"{}-{}",
&environment_check.project_id.clone(),
@@ -930,7 +939,7 @@ pub fn get_svc(
provider_kind: Kind,
environment_check: Environment,
secrets: FuncTestsSecrets,
) -> Result<SVC, SimpleError> {
) -> Result<SVC, CommandError> {
let namespace_name = format!(
"{}-{}",
&environment_check.project_id.clone(),

View File

@@ -5,11 +5,10 @@ use self::test_utilities::utilities::{
engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, logger, FuncTestsSecrets,
};
use ::function_name::named;
use qovery_engine::build_platform::{BuildPlatform, BuildResult, CacheResult};
use qovery_engine::build_platform::{BuildPlatform, CacheResult};
use qovery_engine::cloud_provider::Kind;
use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs;
use qovery_engine::container_registry::{ContainerRegistry, PullResult, PushResult};
use qovery_engine::error::EngineError;
use qovery_engine::container_registry::{ContainerRegistry, PullResult};
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType};
use qovery_engine::transaction::TransactionResult;
use std::collections::BTreeMap;
@@ -89,7 +88,6 @@ fn test_build_cache() {
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let logger = logger();
let secrets = FuncTestsSecrets::new();
let context = context(
secrets

View File

@@ -92,7 +92,6 @@ fn test_build_cache() {
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let logger = logger();
let secrets = FuncTestsSecrets::new();
let context = context(
secrets

View File

@@ -95,7 +95,6 @@ fn test_build_cache() {
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let logger = logger();
let secrets = FuncTestsSecrets::new();
let context = context(
secrets