diff --git a/Cargo.lock b/Cargo.lock index c1eaf7f1..7cae05a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -449,9 +449,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ "generic-array 0.14.4", "subtle", @@ -1087,9 +1087,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", "digest 0.9.0", @@ -1202,7 +1202,7 @@ dependencies = [ "itoa", "log", "net2", - "rustc_version", + "rustc_version 0.2.3", "time 0.1.44", "tokio 0.1.22", "tokio-buf", @@ -1559,10 +1559,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] -name = "md5" -version = "0.7.0" +name = "md-5" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" +checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "opaque-debug 0.3.0", +] [[package]] name = "memchr" @@ -1825,7 +1830,7 @@ checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ "lock_api 0.3.4", "parking_lot_core 0.6.2", - "rustc_version", + "rustc_version 0.2.3", ] [[package]] @@ -1849,7 +1854,7 @@ dependencies = [ "cloudabi", "libc", "redox_syscall 0.1.57", - "rustc_version", + "rustc_version 0.2.3", "smallvec 0.6.14", "winapi 0.3.9", ] @@ -2603,9 +2608,9 @@ dependencies = [ [[package]] name = "rusoto_core" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02aff20978970d47630f08de5f0d04799497818d16cafee5aec90c4b4d0806cf" +checksum = "5b4f000e8934c1b4f70adde180056812e7ea6b1a247952db8ee98c94cd3116cc" dependencies = [ "async-trait", "base64 0.13.0", @@ -2619,7 +2624,7 @@ dependencies = [ "log", "rusoto_credential", "rusoto_signature", - "rustc_version", + "rustc_version 0.4.0", "serde", "serde_json", "tokio 1.10.0", @@ -2628,9 +2633,9 @@ dependencies = [ [[package]] name = "rusoto_credential" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e91e4c25ea8bfa6247684ff635299015845113baaa93ba8169b9e565701b58e" +checksum = "6a46b67db7bb66f5541e44db22b0a02fed59c9603e146db3a9e633272d3bac2f" dependencies = [ "async-trait", "chrono", @@ -2646,9 +2651,9 @@ dependencies = [ [[package]] name = "rusoto_dynamodb" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f26af40f36409cb8fae3069690f78f638f747b55c7b90f338d5ed36016b0cda" +checksum = "7935e1f9ca57c4ee92a4d823dcd698eb8c992f7e84ca21976ae72cd2b03016e7" dependencies = [ "async-trait", "bytes 1.0.1", @@ -2660,9 +2665,9 @@ dependencies = [ [[package]] name = "rusoto_ecr" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab222491e156f033926d40c663d57a6b60a5c5ec94e696e66f52a0c64d20dbf" +checksum = "93ec4a28e4fb276307c2129abb16cf7d5573da4ec24d9e3895cc5c8a8bc21a4d" dependencies = [ "async-trait", "bytes 1.0.1", @@ -2674,9 +2679,9 @@ dependencies = [ [[package]] name = "rusoto_eks" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d7e1e577d4102a9d80d5eafc0547064d3e8817d094f00e95ae45d03ae3accb" +checksum = "7bada849ce4a4836ae23920613144339b23dc0ebfc4d19fbc20f6b7b9d3cb6d9" dependencies = [ "async-trait", "bytes 1.0.1", @@ -2689,23 +2694,23 @@ dependencies = [ [[package]] name = "rusoto_iam" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0268b898abed79c59f8468c4991d0f97ed0925049db228cff623ecac44c5b3a6" +checksum = "eeb3a551b0fea2882b7caafc7dcdc74c80e73f16c41bfb722f4ea88e3e21625d" dependencies = [ "async-trait", "bytes 1.0.1", "futures 0.3.16", "rusoto_core", - "serde_urlencoded 0.6.1", + "serde_urlencoded 0.7.0", "xml-rs", ] [[package]] name = "rusoto_s3" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abc3f56f14ccf91f880b9a9c2d0556d8523e8c155041c54db155b384a1dd1119" +checksum = "048c2fe811a823ad5a9acc976e8bf4f1d910df719dcf44b15c3e96c5b7a51027" dependencies = [ "async-trait", "bytes 1.0.1", @@ -2716,41 +2721,42 @@ dependencies = [ [[package]] name = "rusoto_signature" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486e6b1673ab3e0ba1ded284fb444845fe1b7f41d13989a54dd60f62a7b2baa" +checksum = "6264e93384b90a747758bcc82079711eacf2e755c3a8b5091687b5349d870bcc" dependencies = [ "base64 0.13.0", "bytes 1.0.1", + "chrono", + "digest 0.9.0", "futures 0.3.16", "hex", "hmac", "http 0.2.4", "hyper 0.14.11", "log", - "md5", + "md-5", "percent-encoding 2.1.0", "pin-project-lite 0.2.7", "rusoto_credential", - "rustc_version", + "rustc_version 0.4.0", "serde", "sha2", - "time 0.2.27", "tokio 1.10.0", ] [[package]] name = "rusoto_sts" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f93005e0c3b9e40a424b50ca71886d2445cc19bb6cdac3ac84c2daff482eb59" +checksum = "4e7edd42473ac006fd54105f619e480b0a94136e7f53cf3fb73541363678fd92" dependencies = [ "async-trait", "bytes 1.0.1", "chrono", "futures 0.3.16", "rusoto_core", - "serde_urlencoded 0.6.1", + "serde_urlencoded 0.7.0", "xml-rs", ] @@ -2788,6 +2794,15 @@ dependencies = [ "semver 0.9.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.4", +] + [[package]] name = "rustversion" version = "1.0.6" @@ -2925,18 +2940,6 @@ dependencies = [ "url 1.7.2", ] -[[package]] -name = "serde_urlencoded" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" -dependencies = [ - "dtoa", - "itoa", - "serde", - "url 2.2.2", -] - [[package]] name = "serde_urlencoded" version = "0.7.0" @@ -2991,9 +2994,9 @@ dependencies = [ [[package]] name = "shlex" -version = "0.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" @@ -3071,7 +3074,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" dependencies = [ "discard", - "rustc_version", + "rustc_version 0.2.3", "stdweb-derive", "stdweb-internal-macros", "stdweb-internal-runtime", diff --git a/Cargo.toml b/Cargo.toml index 21d37564..b4bf09ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,14 +55,14 @@ serde_json = "1.0.64" serde_derive = "1.0.126" # AWS deps tokio = { version = "1.10.0", features = ["full"] } -rusoto_core = "0.46.0" -rusoto_sts = "0.46.0" -rusoto_credential = "0.46.0" -rusoto_ecr = "0.46.0" -rusoto_eks = "0.46.0" -rusoto_s3 = "0.46.0" -rusoto_dynamodb = "0.46.0" -rusoto_iam = "0.46.0" +rusoto_core = "0.47.0" +rusoto_sts = "0.47.0" +rusoto_credential = "0.47.0" +rusoto_ecr = "0.47.0" +rusoto_eks = "0.47.0" +rusoto_s3 = "0.47.0" +rusoto_dynamodb = "0.47.0" +rusoto_iam = "0.47.0" # Digital Ocean Deps digitalocean = "0.1.1" diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 3b5b5327..9ece170b 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -191,6 +191,9 @@ impl<'a> EKS<'a> { "default-s3".to_string(), cloud_provider.access_key_id().clone(), cloud_provider.secret_access_key().clone(), + region.clone(), + true, + context.resource_expiration_in_seconds(), ); Ok(EKS { diff --git a/src/constants.rs b/src/constants.rs index c3df0240..88c8299b 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,6 +1,7 @@ pub const TF_PLUGIN_CACHE_DIR: &str = "TF_PLUGIN_CACHE_DIR"; pub const AWS_ACCESS_KEY_ID: &str = "AWS_ACCESS_KEY_ID"; pub const AWS_SECRET_ACCESS_KEY: &str = "AWS_SECRET_ACCESS_KEY"; +pub const AWS_DEFAULT_REGION: &str = "AWS_DEFAULT_REGION"; pub const KUBECONFIG: &str = "KUBECONFIG"; pub const DIGITAL_OCEAN_TOKEN: &str = "DIGITAL_OCEAN_TOKEN"; pub const DIGITAL_OCEAN_SPACES_ACCESS_ID: &str = "DIGITAL_OCEAN_SPACES_ACCESS_ID"; diff --git a/src/object_storage/s3.rs b/src/object_storage/s3.rs index 86552215..68dbf09e 100644 --- a/src/object_storage/s3.rs +++ b/src/object_storage/s3.rs @@ -1,14 +1,23 @@ +use chrono::{DateTime, Utc}; +use retry::delay::Fixed; use std::fs::File; +use std::path::Path; +use std::str::FromStr; -use crate::cmd::utilities::QoveryCommand; -use retry::delay::Fibonacci; -use retry::{Error, OperationResult}; +use crate::cloud_provider::aws::regions::AwsRegion; +use rusoto_core::credential::StaticProvider; +use rusoto_core::{Client, HttpClient, Region as RusotoRegion}; +use rusoto_s3::{ + CreateBucketConfiguration, CreateBucketRequest, Delete, DeleteBucketRequest, DeleteObjectsRequest, + GetObjectRequest, HeadBucketRequest, ListObjectsRequest, ObjectIdentifier, PutBucketTaggingRequest, + PutBucketVersioningRequest, PutObjectRequest, S3Client, StreamingBody, Tag, Tagging, S3 as RusotoS3, +}; +use tokio::io; -use crate::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY}; -use crate::error::SimpleErrorKind::Other; -use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause, SimpleError}; +use crate::error::{EngineError, EngineErrorCause}; use crate::models::{Context, StringPath}; use crate::object_storage::{Kind, ObjectStorage}; +use crate::runtime::block_on; pub struct S3 { context: Context, @@ -16,24 +25,124 @@ pub struct S3 { name: String, access_key_id: String, secret_access_key: String, + region: AwsRegion, + bucket_versioning_activated: bool, + bucket_ttl_in_seconds: Option, } impl S3 { - pub fn new(context: Context, id: String, name: String, access_key_id: String, secret_access_key: String) -> Self { + pub fn new( + context: Context, + id: String, + name: String, + access_key_id: String, + secret_access_key: String, + region: AwsRegion, + bucket_versioning_activated: bool, + bucket_ttl_in_seconds: Option, + ) -> Self { S3 { context, id, name, - access_key_id, - secret_access_key, + access_key_id: access_key_id.to_string(), + secret_access_key: secret_access_key.to_string(), + region: region.clone(), + bucket_versioning_activated, + bucket_ttl_in_seconds, } } - fn credentials_environment_variables(&self) -> Vec<(&str, &str)> { - vec![ - (AWS_ACCESS_KEY_ID, self.access_key_id.as_str()), - (AWS_SECRET_ACCESS_KEY, self.secret_access_key.as_str()), - ] + fn get_credentials(&self) -> StaticProvider { + StaticProvider::new(self.access_key_id.clone(), self.secret_access_key.clone(), None, None) + } + + fn get_s3_client(&self) -> S3Client { + let region = RusotoRegion::from_str(&self.region.to_aws_format()) + .expect(format!("S3 region `{}` doesn't seems to be valid.", self.region.to_aws_format()).as_str()); + let client = Client::new_with( + self.get_credentials(), + HttpClient::new().expect("unable to create new Http client"), + ); + + S3Client::new_with_client(client, region) + } + + fn is_bucket_name_valid(bucket_name: &str) -> Result<(), Option> { + if bucket_name.is_empty() { + return Err(Some("bucket name cannot be empty".to_string())); + } + + Ok(()) + } + + pub fn bucket_exists(&self, bucket_name: &str) -> bool { + let s3_client = self.get_s3_client(); + + // Note: Using rusoto here for convenience, should be possible via CLI but would be way less convenient. + // Using retry here since there is a lag after bucket creation + retry::retry(Fixed::from_millis(1000).take(10), || { + block_on(s3_client.head_bucket(HeadBucketRequest { + bucket: bucket_name.to_string(), + expected_bucket_owner: None, + })) + }) + .is_ok() + } + + fn empty_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { + if let Err(message) = S3::is_bucket_name_valid(bucket_name) { + let message = format!( + "While trying to empty S3 bucket, name `{}` is invalid: {}", + bucket_name, + message.unwrap_or_else(|| "unknown error".to_string()) + ); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + let s3_client = self.get_s3_client(); + + // make sure to delete all bucket content before trying to delete the bucket + let objects_to_be_deleted = match block_on(s3_client.list_objects(ListObjectsRequest { + bucket: bucket_name.to_string(), + ..Default::default() + })) { + Ok(res) => res.contents.unwrap_or_default(), + Err(_) => { + vec![] + } + }; + + if !objects_to_be_deleted.is_empty() { + if let Err(e) = block_on( + s3_client.delete_objects(DeleteObjectsRequest { + bucket: bucket_name.to_string(), + delete: Delete { + objects: objects_to_be_deleted + .iter() + .filter_map(|e| e.key.clone()) + .map(|e| ObjectIdentifier { + key: e, + version_id: None, + }) + .collect(), + ..Default::default() + }, + ..Default::default() + }), + ) { + let message = format!( + "While trying to empty S3 bucket `{}` region `{}`, cannot delete content: {}", + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + } + + Ok(()) } } @@ -60,40 +169,131 @@ impl ObjectStorage for S3 { } fn create_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - let mut cmd = QoveryCommand::new( - "aws", - &vec!["s3api", "create-bucket", "--bucket", bucket_name], - &self.credentials_environment_variables(), - ); - cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - cmd.exec() - .map_err(|err| SimpleError::new(Other, Some(format!("{}", err)))), - ) + if let Err(message) = S3::is_bucket_name_valid(bucket_name) { + let message = format!( + "While trying to create S3 bucket, name `{}` is invalid: {}", + bucket_name, + message.unwrap_or_else(|| "unknown error".to_string()) + ); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + let s3_client = self.get_s3_client(); + + // check if bucket already exists, if so, no need to recreate it + if self.bucket_exists(bucket_name) { + return Ok(()); + } + + if let Err(e) = block_on(s3_client.create_bucket(CreateBucketRequest { + bucket: bucket_name.to_string(), + create_bucket_configuration: Some(CreateBucketConfiguration { + location_constraint: Some(self.region.to_aws_format()), + }), + ..Default::default() + })) { + let message = format!( + "While trying to create S3 bucket, name `{}` region `{}`: {}", + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + let creation_date: DateTime = Utc::now(); + if let Err(e) = block_on(s3_client.put_bucket_tagging(PutBucketTaggingRequest { + bucket: bucket_name.to_string(), + expected_bucket_owner: None, + tagging: Tagging { + tag_set: vec![ + Tag { + key: "CreationDate".to_string(), + value: format!("{}", creation_date.to_rfc3339()), + }, + Tag { + key: "Ttl".to_string(), + value: format!("{}", self.bucket_ttl_in_seconds.unwrap_or_else(|| 0).to_string()), + }, + ], + }, + ..Default::default() + })) { + let message = format!( + "While trying to add tags on S3 bucket, name `{}` region `{}`: {}", + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + if self.bucket_versioning_activated { + if let Err(e) = block_on(s3_client.put_bucket_versioning(PutBucketVersioningRequest { + bucket: bucket_name.to_string(), + ..Default::default() + })) { + let message = format!( + "While trying to activate versioning on S3 bucket, name `{}` region `{}`: {}", + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + } + } + + Ok(()) } fn delete_bucket(&self, bucket_name: &str) -> Result<(), EngineError> { - let mut cmd = QoveryCommand::new( - "aws", - &vec![ - "s3", - "rb", - "--force", - "--bucket", - format!("s3://{}", bucket_name).as_str(), - ], - &self.credentials_environment_variables(), - ); - cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - cmd.exec() - .map_err(|err| SimpleError::new(Other, Some(format!("{}", err)))), - ) + if let Err(message) = S3::is_bucket_name_valid(bucket_name) { + let message = format!( + "While trying to delete S3 bucket, name `{}` is invalid: {}", + bucket_name, + message.unwrap_or_else(|| "unknown error".to_string()) + ); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + let s3_client = self.get_s3_client(); + + // make sure to delete all bucket content before trying to delete the bucket + if let Err(e) = self.empty_bucket(bucket_name) { + return Err(e); + } + + match block_on(s3_client.delete_bucket(DeleteBucketRequest { + bucket: bucket_name.to_string(), + expected_bucket_owner: None, + })) { + Ok(_) => Ok(()), + Err(e) => { + let message = format!( + "While trying to delete S3 bucket, name `{}` region `{}`: {}", + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + Err(self.engine_error(EngineErrorCause::Internal, message)) + } + } } fn get(&self, bucket_name: &str, object_key: &str, use_cache: bool) -> Result<(StringPath, File), EngineError> { + if let Err(message) = S3::is_bucket_name_valid(bucket_name) { + let message = format!( + "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", + object_key, + bucket_name, + message.unwrap_or_else(|| "unknown error".to_string()) + ); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + let workspace_directory = crate::fs::workspace_directory( self.context().workspace_root_dir(), self.context().execution_id(), @@ -101,7 +301,6 @@ impl ObjectStorage for S3 { ) .map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?; - let s3_url = format!("s3://{}/{}", bucket_name, object_key); let file_path = format!("{}/{}/{}", workspace_directory, bucket_name, object_key); if use_cache { @@ -115,65 +314,142 @@ impl ObjectStorage for S3 { } } - // retrieve config file from object storage - let mut cmd = QoveryCommand::new( - "aws", - &vec!["s3", "cp", s3_url.as_str(), file_path.as_str()], - &self.credentials_environment_variables(), - ); - let result = retry::retry(Fibonacci::from_millis(3000).take(5), || { - // we choose to use the AWS CLI instead of Rusoto S3 due to reliability problems we faced. - let result = cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - cmd.exec() - .map_err(|err| SimpleError::new(Other, Some(format!("{}", err)))), - ); + let s3_client = self.get_s3_client(); - match result { - Ok(_) => OperationResult::Ok(()), - Err(err) => { - debug!("{:?}", err); + match block_on(s3_client.get_object(GetObjectRequest { + bucket: bucket_name.to_string(), + key: object_key.to_string(), + expected_bucket_owner: None, + ..Default::default() + })) { + Ok(mut res) => { + let body = res.body.take(); + let mut body = body.unwrap().into_async_read(); - warn!("Can't download object '{}'. Let's retry...", object_key); + // create parent dir + let path = Path::new(file_path.as_str()); + let parent_dir = path.parent().unwrap(); + let _ = block_on(tokio::fs::create_dir_all(parent_dir)); - OperationResult::Retry(err) + // create file + match block_on( + tokio::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path), + ) { + Ok(mut created_file) => match block_on(io::copy(&mut body, &mut created_file)) { + Ok(_) => { + let file = File::open(path).unwrap(); + Ok((file_path, file)) + } + Err(e) => { + let message = format!("{}", e); + error!("{}", message); + Err(self.engine_error(EngineErrorCause::Internal, message)) + } + }, + Err(e) => { + let message = format!("{}", e); + error!("{}", message); + Err(self.engine_error(EngineErrorCause::Internal, message)) + } } } - }); - - let file = match result { - Ok(_) => File::open(file_path.as_str()), - Err(err) => { - return match err { - Error::Operation { error, .. } => Err(error), - Error::Internal(err) => Err(self.engine_error(EngineErrorCause::Internal, err)), - }; + Err(e) => { + let message = format!( + "While trying to get object `{}` from bucket `{}` region `{}`, error: {}", + object_key, + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + Err(self.engine_error(EngineErrorCause::Internal, message)) } - }; - - match file { - Ok(file) => Ok((file_path, file)), - Err(err) => Err(self.engine_error(EngineErrorCause::Internal, format!("{:?}", err))), } } fn put(&self, bucket_name: &str, object_key: &str, file_path: &str) -> Result<(), EngineError> { - let mut cmd = QoveryCommand::new( - "aws", - &vec![ - "s3", - "cp", - file_path, - format!("s3://{}/{}", bucket_name, object_key).as_str(), - ], - &self.credentials_environment_variables(), - ); - cast_simple_error_to_engine_error( - self.engine_error_scope(), - self.context().execution_id(), - cmd.exec() - .map_err(|err| SimpleError::new(Other, Some(format!("{}", err)))), - ) + if let Err(message) = S3::is_bucket_name_valid(bucket_name) { + let message = format!( + "While trying to get object `{}` from bucket `{}`, bucket name is invalid: {}", + object_key, + bucket_name, + message.unwrap_or_else(|| "unknown error".to_string()) + ); + return Err(self.engine_error(EngineErrorCause::Internal, message)); + } + + let s3_client = self.get_s3_client(); + + match block_on(s3_client.put_object(PutObjectRequest { + bucket: bucket_name.to_string(), + key: object_key.to_string(), + body: Some(StreamingBody::from(match std::fs::read(file_path.clone()) { + Ok(x) => x, + Err(e) => { + return Err(self.engine_error( + EngineErrorCause::Internal, + format!( + "error while uploading object {} to bucket {}. {}", + object_key, bucket_name, e + ), + )) + } + })), + expected_bucket_owner: None, + ..Default::default() + })) { + Ok(_) => Ok(()), + Err(e) => { + let message = format!( + "While trying to put object `{}` from bucket `{}` region `{}`, error: {}", + object_key, + bucket_name, + self.region.to_aws_format(), + e + ); + error!("{}", message); + Err(self.engine_error(EngineErrorCause::Internal, message)) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + struct TestCase<'a> { + bucket_name_input: &'a str, + expected_output: Result<(), Option>, + description: &'a str, + } + + #[test] + fn test_is_bucket_name_valid() { + // setup: + let test_cases: Vec = vec![ + TestCase { + bucket_name_input: "", + expected_output: Err(Some(String::from("bucket name cannot be empty"))), + description: "bucket name is empty", + }, + TestCase { + bucket_name_input: "valid", + expected_output: Ok(()), + description: "bucket name is valid", + }, + ]; + + for tc in test_cases { + // execute: + let result = S3::is_bucket_name_valid(tc.bucket_name_input); + + // verify: + assert_eq!(tc.expected_output, result, "{}", tc.description); + } } } diff --git a/test_utilities/Cargo.lock b/test_utilities/Cargo.lock index 073ef1bc..555b69cb 100644 --- a/test_utilities/Cargo.lock +++ b/test_utilities/Cargo.lock @@ -446,9 +446,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ "generic-array 0.14.4", "subtle", diff --git a/test_utilities/src/aws.rs b/test_utilities/src/aws.rs index 9f6e274c..f4f71593 100644 --- a/test_utilities/src/aws.rs +++ b/test_utilities/src/aws.rs @@ -20,14 +20,15 @@ use crate::cloudflare::dns_provider_cloudflare; use crate::common::{Cluster, ClusterDomain}; use crate::utilities::{build_platform_local_docker, FuncTestsSecrets}; -pub const AWS_REGION_FOR_S3: &str = "eu-west-3"; -pub const AWS_TEST_REGION: &str = "eu-west-3"; +pub const AWS_REGION_FOR_S3: AwsRegion = AwsRegion::EuWest3; +pub const AWS_TEST_REGION: AwsRegion = AwsRegion::EuWest3; pub const AWS_KUBERNETES_MAJOR_VERSION: u8 = 1; pub const AWS_KUBERNETES_MINOR_VERSION: u8 = 19; pub const AWS_KUBERNETES_VERSION: &'static str = formatcp!("{}.{}", AWS_KUBERNETES_MAJOR_VERSION, AWS_KUBERNETES_MINOR_VERSION); pub const AWS_DATABASE_INSTANCE_TYPE: &str = "db.t3.micro"; pub const AWS_DATABASE_DISK_TYPE: &str = "gp2"; +pub const AWS_RESOURCE_TTL_IN_SECONDS: u32 = 7200; pub fn container_registry_ecr(context: &Context) -> ECR { let secrets = FuncTestsSecrets::new(); diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index fe8dfe0b..fcbaaaf3 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -44,6 +44,11 @@ use std::path::Path; use std::str::FromStr; use tracing::{span, Level}; +pub enum RegionActivationStatus { + Deactivated, + Activated, +} + pub enum ClusterDomain { Default, Custom(String), diff --git a/tests/aws/aws_s3.rs b/tests/aws/aws_s3.rs new file mode 100644 index 00000000..bc25292a --- /dev/null +++ b/tests/aws/aws_s3.rs @@ -0,0 +1,253 @@ +use qovery_engine::cloud_provider::aws::regions::AwsRegion; +use qovery_engine::object_storage::s3::S3; +use qovery_engine::object_storage::ObjectStorage; +use std::str::FromStr; +use tempfile::NamedTempFile; +use test_utilities::utilities::{context, generate_id, FuncTestsSecrets}; + +#[cfg(feature = "test-aws-infra")] +#[test] +fn test_delete_bucket() { + // setup: + let context = context("fake_orga_id", "fake_cluster_id"); + let secrets = FuncTestsSecrets::new(); + let id = generate_id(); + let name = format!("test-{}", id.to_string()); + let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); + let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); + let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); + let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) + .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + + let aws_os = S3::new( + context.clone(), + id.to_string(), + name.to_string(), + aws_access_key, + aws_secret_key, + aws_region.clone(), + false, + context.resource_expiration_in_seconds(), + ); + + let bucket_name = format!("qovery-test-bucket-{}", generate_id()); + + aws_os + .create_bucket(bucket_name.as_str()) + .expect(format!("error while creating S3 bucket in `{}`", aws_region.to_aws_format()).as_str()); + + // compute: + let result = aws_os.delete_bucket(bucket_name.as_str()); + + // validate: + assert!( + result.is_ok(), + "Delete bucket failed in `{}`", + aws_region.to_aws_format() + ); + assert!( + !aws_os.bucket_exists(bucket_name.as_str()), + "Delete bucket failed in `{}`, bucket still exists", + aws_region.to_aws_format() + ); +} + +#[cfg(feature = "test-aws-infra")] +#[test] +fn test_create_bucket() { + // setup: + let context = context("fake_orga_id", "fake_cluster_id"); + let secrets = FuncTestsSecrets::new(); + let id = generate_id(); + let name = format!("test-{}", id.to_string()); + let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); + let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); + let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); + let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) + .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + + let aws_os = S3::new( + context.clone(), + id.to_string(), + name.to_string(), + aws_access_key, + aws_secret_key, + aws_region.clone(), + false, + context.resource_expiration_in_seconds(), + ); + + let bucket_name = format!("qovery-test-bucket-{}", generate_id()); + + // compute: + let result = aws_os.create_bucket(bucket_name.as_str()); + + // validate: + assert!( + result.is_ok(), + "Create bucket failed in `{}`", + aws_region.to_aws_format() + ); + assert!( + aws_os.bucket_exists(bucket_name.as_str()), + "Create bucket failed in `{}`, bucket doesn't exist", + aws_region.to_aws_format() + ); + + // clean-up: + aws_os.delete_bucket(bucket_name.as_str()).unwrap_or_else(|_| { + panic!( + "error deleting S3 bucket `{}` in `{}`", + bucket_name, + aws_region.to_aws_format() + ) + }); +} + +#[cfg(feature = "test-aws-infra")] +#[test] +fn test_recreate_bucket() { + // setup: + let context = context("fake_orga_id", "fake_cluster_id"); + let secrets = FuncTestsSecrets::new(); + let id = generate_id(); + let name = format!("test-{}", id.to_string()); + let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); + let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); + let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); + let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) + .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + + let aws_os = S3::new( + context.clone(), + id.to_string(), + name.to_string(), + aws_access_key, + aws_secret_key, + aws_region.clone(), + false, + context.resource_expiration_in_seconds(), + ); + + let bucket_name = format!("qovery-test-bucket-{}", generate_id()); + + // compute & validate: + let create_result = aws_os.create_bucket(bucket_name.as_str()); + assert!(create_result.is_ok()); + assert!(aws_os.bucket_exists(bucket_name.as_str())); + + let delete_result = aws_os.delete_bucket(bucket_name.as_str()); + assert!(delete_result.is_ok()); + + let recreate_result = aws_os.create_bucket(bucket_name.as_str()); + assert!(recreate_result.is_ok()); + assert!(aws_os.bucket_exists(bucket_name.as_str())); + + // clean-up: + aws_os + .delete_bucket(bucket_name.as_str()) + .unwrap_or_else(|_| panic!("error deleting S3 bucket {}", bucket_name)); +} + +#[cfg(feature = "test-aws-infra")] +#[test] +fn test_put_file() { + // setup: + let context = context("fake_orga_id", "fake_cluster_id"); + let secrets = FuncTestsSecrets::new(); + let id = generate_id(); + let name = format!("test-{}", id.to_string()); + let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); + let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); + let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); + let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) + .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + + let aws_os = S3::new( + context.clone(), + id.to_string(), + name.to_string(), + aws_access_key, + aws_secret_key, + aws_region.clone(), + false, + context.resource_expiration_in_seconds(), + ); + + let bucket_name = format!("qovery-test-bucket-{}", generate_id()); + let object_key = format!("test-object-{}", generate_id()); + + aws_os + .create_bucket(bucket_name.as_str()) + .expect("error while creating object-storage bucket"); + + let temp_file = NamedTempFile::new().expect("error while creating tempfile"); + + // compute: + let result = aws_os.put( + bucket_name.as_str(), + object_key.as_str(), + temp_file.into_temp_path().to_str().unwrap(), + ); + + // validate: + assert!(result.is_ok()); + assert!(aws_os.get(bucket_name.as_str(), object_key.as_str(), false).is_ok()); + + // clean-up: + aws_os + .delete_bucket(bucket_name.as_str()) + .unwrap_or_else(|_| panic!("error deleting S3 bucket {}", bucket_name)); +} + +#[cfg(feature = "test-aws-infra")] +#[test] +fn test_get_file() { + // setup: + let context = context("fake_orga_id", "fake_cluster_id"); + let secrets = FuncTestsSecrets::new(); + let id = generate_id(); + let name = format!("test-{}", id.to_string()); + let aws_access_key = secrets.AWS_ACCESS_KEY_ID.expect("AWS_ACCESS_KEY_ID is not set"); + let aws_secret_key = secrets.AWS_SECRET_ACCESS_KEY.expect("AWS_SECRET_ACCESS_KEY is not set"); + let aws_region_raw = secrets.AWS_DEFAULT_REGION.expect("AWS_DEFAULT_REGION is not set"); + let aws_region = AwsRegion::from_str(aws_region_raw.as_str()) + .expect(format!("AWS region `{}` seems not to be valid", aws_region_raw).as_str()); + + let aws_os = S3::new( + context.clone(), + id.to_string(), + name.to_string(), + aws_access_key, + aws_secret_key, + aws_region.clone(), + false, + context.resource_expiration_in_seconds(), + ); + + let bucket_name = format!("qovery-test-bucket-{}", generate_id()); + let object_key = format!("test-object-{}", generate_id()); + + aws_os + .create_bucket(bucket_name.as_str()) + .expect("error while creating object-storage bucket"); + + let temp_file = NamedTempFile::new().expect("error while creating tempfile"); + let tempfile_path = temp_file.into_temp_path(); + let tempfile_path = tempfile_path.to_str().unwrap(); + + aws_os + .put(bucket_name.as_str(), object_key.as_str(), tempfile_path) + .unwrap_or_else(|_| panic!("error while putting file {} into bucket {}", tempfile_path, bucket_name)); + + // compute: + let result = aws_os.get(bucket_name.as_str(), object_key.as_str(), false); + + // validate: + assert!(result.is_ok()); + + // clean-up: + aws_os + .delete_bucket(bucket_name.as_str()) + .unwrap_or_else(|_| panic!("error deleting S3 bucket {}", bucket_name)); +} diff --git a/tests/aws/mod.rs b/tests/aws/mod.rs index c4e497ac..ace8ed56 100644 --- a/tests/aws/mod.rs +++ b/tests/aws/mod.rs @@ -1,4 +1,5 @@ mod aws_databases; mod aws_environment; mod aws_kubernetes; +mod aws_s3; mod aws_whole_enchilada;