wip: make generic the way to get kube config file

This commit is contained in:
Romaric Philogene
2020-12-24 17:37:55 +01:00
parent 5dbe0e1ac4
commit a42d40da19
18 changed files with 388 additions and 331 deletions

1
Cargo.lock generated
View File

@@ -2746,6 +2746,7 @@ dependencies = [
"qovery-engine",
"rand 0.7.3",
"reqwest 0.10.8",
"retry",
"serde",
"serde_derive",
"serde_json",

View File

@@ -21,12 +21,11 @@ use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCa
use crate::fs::workspace_directory;
use crate::models::{
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel,
ProgressScope,
ProgressScope, StringPath,
};
use crate::object_storage::s3::S3;
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
use crate::unit_conversion::{cpu_string_to_float, ki_to_mi};
use std::io::Write;
pub mod node;
@@ -429,30 +428,11 @@ impl<'a> Kubernetes for EKS<'a> {
Ok(())
}
fn config_file(&self) -> Result<(String, File), EngineError> {
let object_key = format!("qovery-kubeconfigs-{}/{}.yaml", self.id(), self.id());
let file_content = self.s3.get(object_key)?;
let workspace_directory = workspace_directory(
self.context().workspace_root_dir(),
self.context().execution_id(),
format!("kubeconfigs/{}", self.name()),
);
let config_file_path = format!("{}/kubernetes_config_{}", workspace_directory, self.id());
// write file_content into a file
let mut file = match File::create(config_file_path.as_str()) {
Ok(file) => file,
Err(err) => {
let error = format!("{:?}", err);
return Err(self.engine_error(EngineErrorCause::Internal, error));
}
};
let _ = file.write_all(file_content.as_bytes());
Ok((config_file_path, file))
fn config_file(&self) -> Result<(StringPath, File), EngineError> {
self.s3.get(
format!("qovery-kubeconfigs-{}", self.id()),
format!("{}.yaml", self.id()),
)
}
fn config_file_path(&self) -> Result<String, EngineError> {
@@ -460,47 +440,6 @@ impl<'a> Kubernetes for EKS<'a> {
Ok(path)
}
fn resources(&self, _environment: &Environment) -> Result<Resources, EngineError> {
let kubernetes_config_file_path = self.config_file_path()?;
let nodes = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
cmd::kubectl::kubectl_exec_get_node(
kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
),
)?;
let mut resources = Resources {
free_cpu: 0.0,
max_cpu: 0.0,
free_ram_in_mib: 0,
max_ram_in_mib: 0,
free_pods: 0,
max_pods: 0,
running_nodes: 0,
};
for node in nodes.items {
resources.free_cpu += cpu_string_to_float(node.status.allocatable.cpu);
resources.max_cpu += cpu_string_to_float(node.status.capacity.cpu);
resources.free_ram_in_mib += ki_to_mi(node.status.allocatable.memory);
resources.max_ram_in_mib += ki_to_mi(node.status.capacity.memory);
resources.free_pods = match node.status.allocatable.pods.parse::<u16>() {
Ok(v) => v,
_ => 0,
};
resources.max_pods = match node.status.capacity.pods.parse::<u16>() {
Ok(v) => v,
_ => 0,
};
resources.running_nodes += 1;
}
Ok(resources)
}
fn on_create(&self) -> Result<(), EngineError> {
info!("EKS.on_create() called for {}", self.name());

View File

@@ -183,13 +183,7 @@ impl Create for Application {
),
)?;
let kubeconfig_path = common::kubernetes_config_path(
workspace_dir.as_str(),
kubernetes.id(),
kubernetes.region(),
digitalocean.spaces_secret_key.as_str(),
digitalocean.spaces_access_id.as_str(),
);
let kubeconfig_path = kubernetes.config_file_path();
// define labels to add to namespace
let namespace_labels = match self.context.resource_expiration_in_seconds() {

View File

@@ -7,51 +7,12 @@ use std::os::unix::fs::PermissionsExt;
use reqwest::StatusCode;
use crate::cloud_provider::digitalocean::models::cluster::Clusters;
use crate::cloud_provider::kubernetes::Kubernetes;
use crate::container_registry::docr::get_header_with_bearer;
use crate::error::{SimpleError, SimpleErrorKind};
use crate::object_storage::do_space::download_space_object;
use crate::runtime;
pub const DO_CLUSTER_API_PATH: &str = "https://api.digitalocean.com/v2/kubernetes/clusters";
pub fn kubernetes_config_path(
workspace_directory: &str,
kubernetes_cluster_id: &str,
region: &str,
spaces_secret_key: &str,
spaces_access_id: &str,
) -> Result<String, SimpleError> {
let kubernetes_config_bucket_name = format!("qovery-kubeconfigs-{}", kubernetes_cluster_id);
let kubernetes_config_object_key = format!("{}.yaml", kubernetes_cluster_id);
let kubernetes_config_file_path = format!(
"{}/kubernetes_config_{}",
workspace_directory, kubernetes_cluster_id
);
// download kubeconfig file
let _ = runtime::async_run(download_space_object(
spaces_access_id,
spaces_secret_key,
kubernetes_config_bucket_name.as_str(),
kubernetes_config_object_key.as_str(),
region,
kubernetes_config_file_path.as_str().clone(),
));
// removes warning kubeconfig is (world/group) readable
let file = File::open(kubernetes_config_file_path.clone().as_str())?;
let metadata = file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o400);
fs::set_permissions(kubernetes_config_file_path.clone().as_str(), permissions)?;
Ok(kubernetes_config_file_path.clone())
}
// retrieve the digital ocean uuid of the kube cluster from our cluster name
// each (terraform) apply may change the cluster uuid, so We need to retrieve it from the Digital Ocean API
pub fn get_uuid_of_cluster_from_name(

View File

@@ -16,8 +16,10 @@ use crate::error::{cast_simple_error_to_engine_error, EngineError};
use crate::fs::workspace_directory;
use crate::models::{
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel,
ProgressScope,
ProgressScope, StringPath,
};
use crate::object_storage::spaces::Spaces;
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
pub mod cidr;
@@ -50,6 +52,7 @@ pub struct DOKS<'a> {
cloud_provider: &'a DO,
nodes: Vec<Node>,
dns_provider: &'a dyn DnsProvider,
spaces: Spaces,
template_directory: String,
options: Options,
listeners: Listeners,
@@ -69,6 +72,15 @@ impl<'a> DOKS<'a> {
) -> Self {
let template_directory = format!("{}/digitalocean/bootstrap", context.lib_root_dir());
let spaces = Spaces::new(
context.clone(),
"spaces-temp-id".to_string(),
"my-spaces-object-storage".to_string(),
cloud_provider.spaces_access_id.clone(),
cloud_provider.spaces_secret_key.clone(),
"".to_string(),
);
DOKS {
context,
id: id.to_string(),
@@ -77,6 +89,7 @@ impl<'a> DOKS<'a> {
region: region.to_string(),
cloud_provider,
dns_provider,
spaces,
options,
nodes,
template_directory,
@@ -84,10 +97,6 @@ impl<'a> DOKS<'a> {
}
}
fn remove_whitespace(s: &mut String) {
s.retain(|c| !c.is_whitespace());
}
// create a context to render tf files (terraform) contained in lib/digitalocean/
fn tera_context(&self) -> TeraContext {
let mut context = TeraContext::new();
@@ -108,10 +117,8 @@ impl<'a> DOKS<'a> {
context.insert("oks_master_size", "s-4vcpu-8gb");
// Network
let vpc_name = &self.options.vpc_name;
context.insert("vpc_name", vpc_name);
let vpc_cidr_block = self.options.vpc_cidr_block.clone();
context.insert("vpc_cidr_block", &vpc_cidr_block);
context.insert("vpc_name", self.options.vpc_name.as_str());
context.insert("vpc_cidr_block", self.options.vpc_cidr_block.as_str());
// Qovery
context.insert("organization_id", self.cloud_provider.organization_id());
@@ -295,16 +302,16 @@ impl<'a> Kubernetes for DOKS<'a> {
Ok(())
}
fn config_file(&self) -> Result<(String, File), EngineError> {
unimplemented!()
fn config_file(&self) -> Result<(StringPath, File), EngineError> {
self.spaces.get(
format!("qovery-kubeconfigs-{}", self.id()),
format!("{}.yaml", self.id()),
)
}
fn config_file_path(&self) -> Result<String, EngineError> {
unimplemented!()
}
fn resources(&self, _environment: &Environment) -> Result<Resources, EngineError> {
unimplemented!()
let (path, _) = self.config_file()?;
Ok(path)
}
fn on_create(&self) -> Result<(), EngineError> {

View File

@@ -120,22 +120,7 @@ impl Router {
.map(|x| x.unwrap())
.collect::<Vec<_>>();
let workspace_dir = self.workspace_directory();
let digitalocean = kubernetes
.cloud_provider()
.as_any()
.downcast_ref::<DO>()
.unwrap();
let kubernetes_config_file_path = common::kubernetes_config_path(
workspace_dir.as_str(),
kubernetes.id(),
kubernetes.region(),
digitalocean.spaces_secret_key.as_str(),
digitalocean.spaces_access_id.as_str(),
);
let do_credentials_envs = vec![(DIGITAL_OCEAN_TOKEN, digitalocean.token.as_str())];
let kubernetes_config_file_path = kubernetes.config_file_path();
match kubernetes_config_file_path {
Ok(kubernetes_config_file_path_string) => {
@@ -145,7 +130,9 @@ impl Router {
kubernetes_config_file_path_string.as_str(),
"nginx-ingress",
"app=nginx-ingress,component=controller",
do_credentials_envs.clone(),
kubernetes
.cloud_provider()
.credentials_environment_variables(),
);
match external_ingress_hostname_default {
@@ -170,7 +157,9 @@ impl Router {
kubernetes_config_file_path_string.as_str(),
environment.namespace(),
"app=nginx-ingress,component=controller",
do_credentials_envs.clone(),
kubernetes
.cloud_provider()
.credentials_environment_variables(),
);
match external_ingress_hostname_custom {
@@ -330,17 +319,7 @@ impl Create for Router {
let workspace_dir = self.workspace_directory();
let helm_release_name = self.helm_release_name();
let kubernetes_config_file_path = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
common::kubernetes_config_path(
workspace_dir.as_str(),
kubernetes.id(),
kubernetes.region(),
digitalocean.spaces_secret_key.as_str(),
digitalocean.spaces_access_id.as_str(),
),
)?;
let kubernetes_config_file_path = kubernetes.config_file_path()?;
// respect order - getting the context here and not before is mandatory
// the nginx-ingress must be available to get the external dns target if necessary

View File

@@ -9,8 +9,13 @@ use crate::cloud_provider::service::Service;
use crate::cloud_provider::{CloudProvider, DeploymentTarget};
use crate::cmd::kubectl;
use crate::dns_provider::DnsProvider;
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::models::{Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope};
use crate::error::{
cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope,
};
use crate::models::{
Context, Listen, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope, StringPath,
};
use crate::unit_conversion::{cpu_string_to_float, ki_to_mi};
pub trait Kubernetes: Listen {
fn context(&self) -> &Context;
@@ -25,9 +30,48 @@ pub trait Kubernetes: Listen {
fn cloud_provider(&self) -> &dyn CloudProvider;
fn dns_provider(&self) -> &dyn DnsProvider;
fn is_valid(&self) -> Result<(), EngineError>;
fn config_file(&self) -> Result<(String, File), EngineError>;
fn config_file(&self) -> Result<(StringPath, File), EngineError>;
fn config_file_path(&self) -> Result<String, EngineError>;
fn resources(&self, environment: &Environment) -> Result<Resources, EngineError>;
fn resources(&self, _environment: &Environment) -> Result<Resources, EngineError> {
let kubernetes_config_file_path = self.config_file_path()?;
let nodes = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context().execution_id(),
crate::cmd::kubectl::kubectl_exec_get_node(
kubernetes_config_file_path,
self.cloud_provider().credentials_environment_variables(),
),
)?;
let mut resources = Resources {
free_cpu: 0.0,
max_cpu: 0.0,
free_ram_in_mib: 0,
max_ram_in_mib: 0,
free_pods: 0,
max_pods: 0,
running_nodes: 0,
};
for node in nodes.items {
resources.free_cpu += cpu_string_to_float(node.status.allocatable.cpu);
resources.max_cpu += cpu_string_to_float(node.status.capacity.cpu);
resources.free_ram_in_mib += ki_to_mi(node.status.allocatable.memory);
resources.max_ram_in_mib += ki_to_mi(node.status.capacity.memory);
resources.free_pods = match node.status.allocatable.pods.parse::<u16>() {
Ok(v) => v,
_ => 0,
};
resources.max_pods = match node.status.capacity.pods.parse::<u16>() {
Ok(v) => v,
_ => 0,
};
resources.running_nodes += 1;
}
Ok(resources)
}
fn on_create(&self) -> Result<(), EngineError>;
fn on_create_error(&self) -> Result<(), EngineError>;
fn on_upgrade(&self) -> Result<(), EngineError>;

View File

@@ -202,11 +202,6 @@ impl DOCR {
Ok(PushResult { image })
}
fn get_or_create_repository(&self, _image: &Image) -> Result<(), EngineError> {
// TODO check if repository really exist
self.create_repository(&_image)
}
pub fn delete_repository(&self, _image: &Image) -> Result<(), EngineError> {
let headers = get_header_with_bearer(&self.api_key);
let res = reqwest::blocking::Client::new()

View File

@@ -962,3 +962,6 @@ impl Metadata {
}
}
}
/// Represent a String path instead of passing a PathBuf struct
pub type StringPath = String;

View File

@@ -1,73 +0,0 @@
use std::path::Path;
use rusoto_core::{Client, HttpClient, Region};
use rusoto_credential::StaticProvider;
use rusoto_s3::{GetObjectRequest, S3Client, S3};
use tokio::{fs::File, io};
pub async fn download_space_object(
access_key_id: &str,
secret_access_key: &str,
bucket_name: &str,
object_key: &str,
region: &str,
path_to_download: &str,
) {
match Path::new(path_to_download.clone()).exists() {
true => info!(
"File {} already exist, nothing to do",
path_to_download.clone()
),
false => {
//Digital ocean doesn't implement any space download, it use the generic AWS SDK
let region = Region::Custom {
name: region.to_string(),
endpoint: format!("https://{}.digitaloceanspaces.com", region),
};
let credentials = StaticProvider::new(
access_key_id.to_string(),
secret_access_key.to_string(),
None,
None,
);
let client = Client::new_with(credentials, HttpClient::new().unwrap());
let s3_client = S3Client::new_with_client(client, region.clone());
let object = s3_client
.get_object(GetObjectRequest {
bucket: bucket_name.to_string(),
key: object_key.to_string(),
..Default::default()
})
.await;
match object {
Ok(mut obj_bod) => {
let body = obj_bod.body.take();
let mut body = body.unwrap().into_async_read();
let file = File::create(path_to_download.clone()).await;
match file {
Ok(mut created_file) => {
match io::copy(&mut body, &mut created_file).await {
Ok(_) => info!("File {} is well downloaded", path_to_download),
Err(e) => error!("{:?}", e),
}
}
Err(e) => error!(
"Unable to create file {}, error : {:?}",
path_to_download.clone(),
e
),
}
}
Err(e) => error!(
"Unable to download file {} from Space name {} Error: {:?}",
path_to_download.clone(),
bucket_name.clone(),
e
),
};
}
}
}

View File

@@ -1,19 +1,18 @@
use serde::{Deserialize, Serialize};
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
use crate::models::Context;
use crate::models::{Context, StringPath};
use std::fs::File;
pub mod do_space;
pub mod s3;
pub mod spaces;
#[derive(Serialize, Deserialize, Clone)]
pub enum Kind {
S3,
Space,
Spaces,
}
pub type FileContent = String;
pub trait ObjectStorage {
fn context(&self) -> &Context;
fn kind(&self) -> Kind;
@@ -29,8 +28,9 @@ pub trait ObjectStorage {
fn delete_bucket<S>(&self, bucket_name: S) -> Result<(), EngineError>
where
S: Into<String>;
fn get<S>(&self, object_key: S) -> Result<FileContent, EngineError>
fn get<T, S>(&self, bucket_name: T, object_key: S) -> Result<(StringPath, File), EngineError>
where
T: Into<String>,
S: Into<String>;
fn engine_error_scope(&self) -> EngineErrorScope {
EngineErrorScope::ObjectStorage(self.id().to_string(), self.name().to_string())

View File

@@ -1,13 +1,15 @@
use std::fs::read_to_string;
use std::fs::{read_to_string, File};
use std::io::Write;
use chrono::Utc;
use retry::delay::Fibonacci;
use retry::OperationResult;
use retry::{Error, OperationResult};
use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY};
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause};
use crate::models::Context;
use crate::object_storage::{FileContent, Kind, ObjectStorage};
use crate::models::{Context, StringPath};
use crate::object_storage::{Kind, ObjectStorage};
use std::path::{Path, PathBuf};
pub struct S3 {
context: Context,
@@ -40,36 +42,6 @@ impl S3 {
(AWS_SECRET_ACCESS_KEY, self.secret_access_key.as_str()),
]
}
fn get_object<S>(&self, object_key: S) -> Result<FileContent, EngineError>
where
S: Into<String>,
{
// we choose to use the AWS CLI instead of Rusoto S3 due to reliability problems we faced.
let s3_url = format!("s3://{}", object_key.into());
let local_path = format!("/tmp/{}.s3object", Utc::now().timestamp_millis()); // FIXME: change hardcoded /tmp/
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context().execution_id(),
crate::cmd::utilities::exec_with_envs(
"aws",
vec!["s3", "cp", &s3_url, &local_path],
self.credentials_environment_variables(),
),
)?;
match read_to_string(&local_path) {
Ok(file_content) => Ok(file_content),
Err(err) => {
let message = format!("{:?}", err);
error!("{}", message);
Err(self.engine_error(EngineErrorCause::Internal, message))
}
}
}
}
impl ObjectStorage for S3 {
@@ -132,41 +104,68 @@ impl ObjectStorage for S3 {
)
}
fn get<S>(&self, object_key: S) -> Result<FileContent, EngineError>
fn get<T, S>(&self, bucket_name: T, object_key: S) -> Result<(StringPath, File), EngineError>
where
T: Into<String>,
S: Into<String>,
{
let bucket_name = bucket_name.into();
let object_key = object_key.into();
let file_content_result = retry::retry(Fibonacci::from_millis(3000).take(5), || match self
.get_object(object_key.as_str())
{
Ok(file_content) => OperationResult::Ok(file_content),
Err(err) => {
debug!("{:?}", err);
warn!(
"Can't download object '{}'. Let's retry...",
object_key.as_str()
);
let workspace_directory = crate::fs::workspace_directory(
self.context().workspace_root_dir(),
self.context().execution_id(),
format!("object-storage/s3/{}", self.name()),
);
OperationResult::Retry(err)
let s3_url = format!("s3://{}/{}", bucket_name.as_str(), object_key.as_str());
let file_path = format!(
"{}/{}/{}",
workspace_directory,
bucket_name.as_str(),
object_key.as_str()
);
let result = retry::retry(Fibonacci::from_millis(3000).take(5), || {
// we choose to use the AWS CLI instead of Rusoto S3 due to reliability problems we faced.
let result = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context().execution_id(),
crate::cmd::utilities::exec_with_envs(
"aws",
vec!["s3", "cp", s3_url.as_str(), file_path.as_str()],
self.credentials_environment_variables(),
),
);
match result {
Ok(_) => OperationResult::Ok(()),
Err(err) => {
debug!("{:?}", err);
warn!(
"Can't download object '{}'. Let's retry...",
object_key.as_str()
);
OperationResult::Retry(err)
}
}
});
let file_content = match file_content_result {
Ok(file_content) => file_content,
Err(_) => {
let message = "file content is empty (retry \
failed multiple times) - which is not the \
expected content - what's wrong?"
.to_string();
error!("{}", message);
return Err(self.engine_error(EngineErrorCause::Internal, message));
let file = match result {
Ok(_) => File::open(file_path.as_str()),
Err(err) => {
return match err {
Error::Operation { error, .. } => Err(error),
Error::Internal(err) => Err(self.engine_error(EngineErrorCause::Internal, err)),
};
}
};
Ok(file_content)
match file {
Ok(file) => Ok((file_path, file)),
Err(err) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", err))),
}
}
}

View File

@@ -0,0 +1,212 @@
use std::fs::{read_to_string, File};
use std::path::Path;
use chrono::Utc;
use retry::delay::Fibonacci;
use retry::{Error, OperationResult};
use rusoto_core::{Client, HttpClient, Region};
use rusoto_credential::StaticProvider;
use rusoto_s3::{GetObjectRequest, S3Client, S3};
use tokio::io;
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause};
use crate::models::{Context, StringPath};
use crate::object_storage::{Kind, ObjectStorage};
use crate::runtime;
pub struct Spaces {
context: Context,
id: String,
name: String,
access_key_id: String,
secret_access_key: String,
region: String,
}
impl Spaces {
pub fn new(
context: Context,
id: String,
name: String,
access_key_id: String,
secret_access_key: String,
region: String,
) -> Self {
Spaces {
context,
id,
name,
access_key_id,
secret_access_key,
region,
}
}
async fn get_object<T, S, X>(
&self,
bucket_name: T,
object_key: S,
download_into_file_path: X,
) -> Result<File, EngineError>
where
T: Into<String>,
S: Into<String>,
X: AsRef<Path>,
{
// Digital ocean doesn't implement any space download, it use the generic AWS SDK
let region = Region::Custom {
name: self.region.clone(),
endpoint: format!("https://{}.digitaloceanspaces.com", self.region),
};
let credentials = StaticProvider::new(
self.access_key_id.clone(),
self.secret_access_key.clone(),
None,
None,
);
let client = Client::new_with(credentials, HttpClient::new().unwrap());
let s3_client = S3Client::new_with_client(client, region.clone());
let object = s3_client
.get_object(GetObjectRequest {
bucket: bucket_name.into(),
key: object_key.into(),
..Default::default()
})
.await;
match object {
Ok(mut obj_bod) => {
let body = obj_bod.body.take();
let mut body = body.unwrap().into_async_read();
let file = tokio::fs::File::create(download_into_file_path.as_ref()).await;
match file {
Ok(mut created_file) => match io::copy(&mut body, &mut created_file).await {
Ok(_) => Ok(File::open(download_into_file_path.as_ref()).unwrap()),
Err(e) => {
Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e)))
}
},
Err(e) => {
Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e)))
}
}
}
Err(e) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", e))),
}
}
}
impl ObjectStorage for Spaces {
fn context(&self) -> &Context {
&self.context
}
fn kind(&self) -> Kind {
Kind::Spaces
}
fn id(&self) -> &str {
self.id.as_str()
}
fn name(&self) -> &str {
self.name.as_str()
}
fn is_valid(&self) -> Result<(), EngineError> {
// TODO check valid credentials
Ok(())
}
fn create_bucket<S>(&self, bucket_name: S) -> Result<(), EngineError>
where
S: Into<String>,
{
unimplemented!()
}
fn delete_bucket<S>(&self, bucket_name: S) -> Result<(), EngineError>
where
S: Into<String>,
{
unimplemented!()
}
fn get<T, S>(&self, bucket_name: T, object_key: S) -> Result<(StringPath, File), EngineError>
where
T: Into<String>,
S: Into<String>,
{
let bucket_name = bucket_name.into();
let object_key = object_key.into();
let workspace_directory = crate::fs::workspace_directory(
self.context().workspace_root_dir(),
self.context().execution_id(),
format!("object-storage/s3/{}", self.name()),
);
let file_path = format!(
"{}/{}/{}",
workspace_directory,
bucket_name.as_str(),
object_key.as_str()
);
let result = retry::retry(
Fibonacci::from_millis(3000).take(5),
|| match runtime::async_run(self.get_object(
bucket_name.as_str(),
object_key.as_str(),
file_path.as_str(),
)) {
Ok(file) => OperationResult::Ok(file),
Err(err) => {
debug!("{:?}", err);
warn!(
"Can't download object '{}'. Let's retry...",
object_key.as_str()
);
OperationResult::Retry(err)
}
},
);
let file = match result {
Ok(_) => File::open(file_path.as_str()),
Err(err) => {
return match err {
Error::Operation { error, .. } => Err(error),
Error::Internal(err) => Err(self.engine_error(EngineErrorCause::Internal, err)),
};
}
};
match file {
Ok(file) => Ok((file_path, file)),
Err(err) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", err))),
}
}
}
// TODO to cleanup
pub async fn download_space_object(
access_key_id: &str,
secret_access_key: &str,
bucket_name: &str,
object_key: &str,
region: &str,
path_to_download: &str,
) {
match Path::new(path_to_download.clone()).exists() {
true => info!(
"File {} already exist, nothing to do",
path_to_download.clone()
),
false => {}
}
}

View File

@@ -18,6 +18,7 @@ curl = "0.4.34"
reqwest = { version = "0.10.8", features = ["blocking"] }
tracing = "0.1"
tracing-subscriber = "0.2"
retry = "1.0.0"
# Digital Ocean Deps
digitalocean = "0.1.1"
digitalocean = "0.1.1"

View File

@@ -4,7 +4,6 @@ extern crate serde_derive;
use std::fs::File;
use chrono::Utc;
use dirs::home_dir;
use qovery_engine::cloud_provider::aws::kubernetes::node::Node;
use qovery_engine::cloud_provider::aws::kubernetes::EKS;

View File

@@ -1,15 +1,24 @@
use std::fs::read_to_string;
use std::fs::File;
use std::io::Write;
use std::path::Path;
use chrono::Utc;
use curl::easy::Easy;
use dirs::home_dir;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use retry::delay::Fibonacci;
use retry::OperationResult;
use std::os::unix::fs::PermissionsExt;
use tracing::Level;
use tracing_subscriber;
use tracing_subscriber::util::SubscriberInitExt;
use qovery_engine::build_platform::local_docker::LocalDocker;
use qovery_engine::cmd;
use qovery_engine::error::SimpleError;
use qovery_engine::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY};
use qovery_engine::error::{SimpleError, SimpleErrorKind};
use qovery_engine::models::{Context, Environment, Metadata};
use crate::aws::{aws_access_key_id, aws_secret_access_key, KUBE_CLUSTER_ID};
@@ -145,13 +154,7 @@ where
match file_content {
Ok(file_content) => OperationResult::Ok(file_content),
Err(err) => {
warn!(
"Can't download the kubernetes config file {} stored on {}, please check access key and secrets",
kubernetes_config_object_key, kubernetes_config_bucket_name
);
OperationResult::Retry(err)
}
Err(err) => OperationResult::Retry(err),
}
});
@@ -166,12 +169,12 @@ where
};
let mut kubernetes_config_file = File::create(file_path.as_ref())?;
let _ = kubernetes_config_file.write(file_content.as_bytes())?;
let _ = kubernetes_config_file.write_all(file_content.as_bytes())?;
// removes warning kubeconfig is (world/group) readable
let metadata = kubernetes_config_file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o400);
fs::set_permissions(file_path.as_ref(), permissions)?;
std::fs::set_permissions(file_path.as_ref(), permissions)?;
Ok(kubernetes_config_file)
}
@@ -182,16 +185,16 @@ fn get_object_via_aws_cli(
secret_access_key: &str,
bucket_name: &str,
object_key: &str,
) -> Result<FileContent, SimpleError> {
) -> Result<String, SimpleError> {
let s3_url = format!("s3://{}/{}", bucket_name, object_key);
let local_path = format!("/tmp/{}", object_key); // FIXME: change hardcoded /tmp/
exec_with_envs(
qovery_engine::cmd::utilities::exec_with_envs(
"aws",
vec!["s3", "cp", &s3_url, &local_path],
vec![
(AWS_ACCESS_KEY_ID, &access_key_id),
(AWS_SECRET_ACCESS_KEY, &secret_access_key),
(AWS_ACCESS_KEY_ID, access_key_id),
(AWS_SECRET_ACCESS_KEY, secret_access_key),
],
)?;

View File

@@ -6,9 +6,7 @@ use std::io::Read;
use test_utilities::digitalocean::DO_KUBERNETES_VERSION;
use tracing::{error, span, Level};
use qovery_engine::cloud_provider::digitalocean::common::{
get_uuid_of_cluster_from_name, kubernetes_config_path,
};
use qovery_engine::cloud_provider::digitalocean::common::get_uuid_of_cluster_from_name;
use qovery_engine::cloud_provider::digitalocean::kubernetes::DOKS;
use qovery_engine::cloud_provider::digitalocean::models::cluster::Clusters;
use qovery_engine::cmd::kubectl::{kubectl_exec_create_namespace, kubectl_exec_delete_namespace};
@@ -20,6 +18,7 @@ use self::test_utilities::digitalocean::{
get_kube_cluster_name_from_uuid,
};
use self::test_utilities::utilities::{generate_id, init};
use qovery_engine::cloud_provider::kubernetes::Kubernetes;
#[test]
fn create_doks_cluster_in_fra_10() {
@@ -84,13 +83,7 @@ fn create_doks_cluster_in_fra_10() {
//TESTING: Kubeconfig DOWNLOAD
//TODO: Fix the kubernetes_config_path fn
match kubernetes_config_path(
context.lib_root_dir().clone(),
cluster_id.clone(),
region.clone(),
digital_ocean_spaces_secret_key().as_str(),
digital_ocean_spaces_access_id().as_str(),
) {
match kubernetes.config_file_path() {
Ok(file) => {
let do_credentials_envs = vec![(DIGITAL_OCEAN_TOKEN, digitalo.token.as_str())];
// testing kubeconfig file

View File

@@ -1 +1 @@
mod s3;