feat: being able to deploy an application on scaleway (#309)

This CL allows to deploy applications on a Scaleway cluster.

Tickets: DEV-937 DEV-936
This commit is contained in:
Benjamin
2021-08-06 16:11:06 +02:00
committed by GitHub
parent 213a667539
commit 15f5d9307d
37 changed files with 1442 additions and 231 deletions

5
Cargo.lock generated
View File

@@ -140,7 +140,9 @@ version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279"
dependencies = [
"lazy_static",
"memchr",
"regex-automata",
]
[[package]]
@@ -3126,6 +3128,8 @@ dependencies = [
name = "test-utilities"
version = "0.1.0"
dependencies = [
"base64 0.13.0",
"bstr",
"chrono",
"curl",
"digitalocean",
@@ -3136,6 +3140,7 @@ dependencies = [
"rand 0.7.3",
"reqwest 0.10.10",
"retry",
"scaleway_api_rs",
"serde",
"serde_derive",
"serde_json",

View File

@@ -11,12 +11,15 @@ controller:
maxUnavailable: 1
autoscaling:
enabled: true
publishService:
enabled: true
service:
# https://github.com/scaleway/scaleway-cloud-controller-manager/blob/master/docs/loadbalancer-annotations.md
annotations:
service.beta.kubernetes.io/scw-loadbalancer-forward-port-algorithm: roundrobin
service.beta.kubernetes.io/scw-loadbalancer-protocol-http: false
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v1: false
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: false
service.beta.kubernetes.io/scw-loadbalancer-protocol-http: "false"
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v1: "false"
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: "false"
service.beta.kubernetes.io/scw-loadbalancer-health-check-type: tcp
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: "true"
externalTrafficPolicy: "Local"

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,7 @@
apiVersion: v2
name: qovery
description: A Qovery Helm chart for Kubernetes deployments
type: application
version: 0.2.0
appVersion: {{ helm_app_version }}
icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png

View File

@@ -0,0 +1,88 @@
{%- if not is_storage %}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %}
spec:
replicas: {{ total_instances }}
strategy:
type: RollingUpdate
{% if total_instances == 1 %}
rollingUpdate:
maxSurge: 1
{% endif %}
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
template:
metadata:
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %}
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- {{ id }}
topologyKey: "kubernetes.io/hostname"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 60
securityContext: {}
{%- if is_registry_secret %}
imagePullSecrets:
- name: {{ registry_secret_name }}
{%- endif %}
containers:
- name: {{ sanitized_name }}
image: "{{ image_name_with_tag }}"
env:
{%- for ev in environment_variables %}
- name: "{{ ev.key }}"
valueFrom:
secretKeyRef:
name: {{ sanitized_name }}
key: {{ ev.key }}
{%- endfor %}
{%- if private_port %}
ports:
- containerPort: {{ private_port }}
protocol: TCP
readinessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 10
livenessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 20
{%- endif %}
resources:
limits:
cpu: {{ cpu_burst }}
memory: {{ total_ram_in_mib }}Mi
requests:
cpu: {{ total_cpus }}
memory: {{ total_ram_in_mib }}Mi
{%- endif %}

View File

@@ -0,0 +1,53 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ sanitized_name }}-default
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
# Deny all ingress by default to this application
podSelector:
matchLabels:
appId: {{ id }}
app: {{ sanitized_name }}
ownerId: {{ owner_id }}
envId: {{ environment_id }}
policyTypes:
- Ingress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ sanitized_name }}-app-access
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
# Then allow some ingress to this application
podSelector:
matchLabels:
appId: {{ id }}
app: {{ sanitized_name }}
ownerId: {{ owner_id }}
envId: {{ environment_id }}
ingress:
# Allow ingress from same environment
- from:
- podSelector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
# Allow ingress from everywhere but only to application port
{% if is_private_port %}
- ports:
- port: {{ private_port }}
{% endif %}

View File

@@ -0,0 +1,21 @@
{%- if not is_storage %}
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
spec:
minAvailable: 1
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
{%- endif %}

View File

@@ -0,0 +1,31 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
type: Opaque
stringData:
{%- for ev in environment_variables %}
{{ ev.key }}: |-
{{ ev.value }}
{%- endfor %}
---
apiVersion: v1
kind: Secret
metadata:
name: container-registry-token
namespace: {{ namespace }}
labels:
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
data:
.dockerconfigjson: {{ container_registry_docker_json_config }}
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,23 @@
{%- if is_private_port %}
apiVersion: v1
kind: Service
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
type: ClusterIP
ports:
- protocol: TCP
port: {{ private_port }}
targetPort: {{ private_port }}
selector:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
{% endif %}

View File

@@ -0,0 +1,129 @@
{%- if is_storage %}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %}
spec:
replicas: {{ total_instances }}
serviceName: {{ sanitized_name }}
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
template:
metadata:
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %}
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- {{ id }}
topologyKey: "kubernetes.io/hostname"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 60
securityContext: {}
{%- if is_registry_secret %}
imagePullSecrets:
- name: {{ registry_secret_name }}
{%- endif %}
containers:
- name: {{ sanitized_name }}
image: "{{ image_name_with_tag }}"
env:
{%- for ev in environment_variables %}
- name: "{{ ev.key }}"
valueFrom:
secretKeyRef:
name: {{ sanitized_name }}
key: {{ ev.key }}
{%- endfor %}
{%- if private_port %}
ports:
- containerPort: {{ private_port }}
protocol: TCP
readinessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 10
livenessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 20
{%- endif %}
resources:
limits:
cpu: {{ cpu_burst }}
memory: {{ total_ram_in_mib }}Mi
requests:
cpu: {{ total_cpus }}
memory: {{ total_ram_in_mib }}Mi
volumeMounts:
{%- for s in storage %}
- name: {{ s.id }}
mountPath: {{ s.mount_point }}
{%- endfor %}
volumeClaimTemplates:
{%- for s in storage %}
{% if clone %}
- metadata:
name: {{ s.id }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
diskId: {{ s.id }}
diskType: {{ s.storage_type }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: {{ s.storage_type }}
dataSource:
name: {{ s.id }}
kind: PersistentVolumeClaim
resources:
requests:
storage: {{ disk.size_in_gib }}Gi
{% else %}
- metadata:
name: {{ s.id }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
diskId: {{ s.id }}
diskType: {{ s.storage_type }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: {{ s.storage_type }}
resources:
requests:
storage: {{ s.size_in_gib }}Gi
{%- endif %}
{%- endfor %}
{%- endif %}

View File

@@ -0,0 +1,2 @@
# Don't add anything here
# Jinja2 is taken on behalf of Go template

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: qovery
description: A Qovery Helm chart for Kubernetes deployments
type: application
version: 0.2.0
icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png

View File

@@ -0,0 +1,20 @@
{%- if custom_domains|length > 0 %}
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: {{ id }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
spec:
acme:
server: {{ spec_acme_server }}
email: {{ spec_acme_email }}
privateKeySecretRef:
name: acme-{{ id }}-key
solvers:
- http01:
ingress:
class: nginx-qovery
{%- endif %}

View File

@@ -0,0 +1,59 @@
{%- if routes|length >= 1 %}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
routerName: {{ sanitized_name }}
routerId: {{ id }}
envId: {{ environment_id }}
fqdn: "{{ router_default_domain }}"
annotations:
external-dns.alpha.kubernetes.io/hostname: {{ router_default_domain }}
external-dns.alpha.kubernetes.io/ttl: "300"
kubernetes.io/tls-acme: "true"
{%- if custom_domains|length > 0 %}
cert-manager.io/issuer: {{ id }}
{%- else %}
cert-manager.io/cluster-issuer: {{ metadata_annotations_cert_manager_cluster_issuer }}
{%- endif %}
kubernetes.io/ingress.class: "nginx-qovery"
ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,x-csrftoken"
spec:
tls:
{%- if custom_domains|length > 0 %}
- secretName: "router-tls-{{ id }}"
hosts:
{%- for domain in custom_domains %}
- "{{ domain.domain }}"
{%- endfor %}
{%- endif %}
# We dont use secret name as we want to rely on default tls certificate from ingress controller
# which has our wildcard certificate https://cert-manager.io/next-docs/faq/kubed/
rules:
- host: "{{ router_default_domain }}"
http:
paths:
{%- for route in routes %}
- path: "{{ route.path }}"
backend:
serviceName: "{{ route.application_name }}"
servicePort: {{ route.application_port }}
{%- endfor %}
{%- for domain in custom_domains %}
- host: "{{ domain.domain }}"
http:
paths:
{%- for route in routes %}
- path: "{{ route.path }}"
backend:
serviceName: "{{ route.application_name }}"
servicePort: {{ route.application_port }}
{%- endfor %}
{%- endfor %}
{%- endif %}

View File

@@ -0,0 +1,2 @@
# Don't add anything here
# Jinja2 is taken on behalf of Go template

View File

@@ -61,6 +61,8 @@ pub struct Image {
pub commit_id: String,
// registry name where the image has been pushed: Optional
pub registry_name: Option<String>,
// registry docker json config: Optional
pub registry_docker_json_config: Option<String>,
// registry secret to pull image: Optional
pub registry_secret: Option<String>,
// complete registry URL where the image has been pushed

View File

@@ -70,7 +70,7 @@ impl Application {
}
fn is_stateful(&self) -> bool {
self.storage.len() > 0
!self.storage.is_empty()
}
}

View File

@@ -9,12 +9,13 @@ use crate::cloud_provider::models::{
};
use crate::cloud_provider::service::{
default_tera_context, delete_stateless_service, deploy_stateless_service_error, deploy_user_stateless_service,
send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm, Pause, Service, ServiceType,
StatelessService,
scale_down_application, send_progress_on_long_task, Action, Application as CApplication, Create, Delete, Helm,
Pause, Service, ServiceType, StatelessService,
};
use crate::cloud_provider::utilities::{sanitize_name, validate_k8s_required_cpu_and_burstable};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::cmd::kubectl::ScalingKind::{Deployment, Statefulset};
use crate::error::EngineErrorCause::Internal;
use crate::error::{EngineError, EngineErrorScope};
use crate::models::{Context, Listen, Listener, Listeners, ListenersHelper};
@@ -70,6 +71,10 @@ impl Application {
listeners,
}
}
fn is_stateful(&self) -> bool {
!self.storage.is_empty()
}
}
impl crate::cloud_provider::service::Application for Application {
@@ -167,7 +172,10 @@ impl Service for Application {
context.insert("helm_app_version", &commit_id[..7]);
match &self.image().registry_url {
Some(registry_url) => context.insert("image_name_with_tag", registry_url.as_str()),
Some(registry_url) => context.insert(
"image_name_with_tag",
format!("{}/{}", registry_url.as_str(), self.image().name_with_tag()).as_str(),
),
None => {
let image_name_with_tag = self.image().name_with_tag();
warn!(
@@ -190,9 +198,9 @@ impl Service for Application {
context.insert("environment_variables", &environment_variables);
match self.image.registry_name.as_ref() {
Some(registry_name) => {
Some(_) => {
context.insert("is_registry_secret", &true);
context.insert("registry_secret", registry_name);
context.insert("registry_secret_name", "container-registry-token");
}
None => {
context.insert("is_registry_secret", &false);
@@ -225,7 +233,10 @@ impl Service for Application {
id: s.id.clone(),
name: s.name.clone(),
storage_type: match s.storage_type {
StorageType::BlockSsd => "b_ssd",
// TODO(benjaminch): Switch to proper storage class
// Note: Seems volume storage type are not supported, only blocked storage for the time being
// https://github.com/scaleway/scaleway-csi/tree/master/examples/kubernetes#different-storageclass
StorageType::BlockSsd => "scw-sbv-ssd-0", // "b_ssd",
StorageType::LocalSsd => "l_ssd",
}
.to_string(),
@@ -249,6 +260,16 @@ impl Service for Application {
)
}
// container registry credentials
context.insert(
"container_registry_docker_json_config",
self.image
.clone()
.registry_docker_json_config
.unwrap_or("".to_string())
.as_str(),
);
Ok(context)
}
@@ -291,11 +312,14 @@ impl Pause for Application {
fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
info!("SCW.application.on_pause() called for {}", self.name());
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Pause,
Box::new(|| delete_stateless_service(target, self, false)),
)
send_progress_on_long_task(self, crate::cloud_provider::service::Action::Pause, || {
scale_down_application(
target,
self,
0,
if self.is_stateful() { Statefulset } else { Deployment },
)
})
}
fn on_pause_check(&self) -> Result<(), EngineError> {

View File

@@ -2,13 +2,13 @@ use tera::Context as TeraContext;
use crate::cloud_provider::models::{CustomDomain, CustomDomainDataTemplate, Route, RouteDataTemplate};
use crate::cloud_provider::service::{
default_tera_context, delete_stateless_service, send_progress_on_long_task, Action, Create, Delete, Helm, Pause,
Service, ServiceType, StatelessService,
default_tera_context, delete_router, delete_stateless_service, send_progress_on_long_task, Action, Create, Delete,
Helm, Pause, Service, ServiceType, StatelessService,
};
use crate::cloud_provider::utilities::sanitize_name;
use crate::cloud_provider::utilities::{check_cname_for, sanitize_name};
use crate::cloud_provider::DeploymentTarget;
use crate::cmd::helm::Timeout;
use crate::error::{EngineError, EngineErrorScope};
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause, EngineErrorScope};
use crate::models::{Context, Listen, Listener, Listeners};
pub struct Router {
@@ -104,7 +104,7 @@ impl Service for Router {
DeploymentTarget::ManagedServices(k, env) => (*k, *env),
DeploymentTarget::SelfHosted(k, env) => (*k, *env),
};
let context = default_tera_context(self, kubernetes, environment);
let mut context = default_tera_context(self, kubernetes, environment);
let applications = environment
.stateless_services
@@ -112,7 +112,7 @@ impl Service for Router {
.filter(|x| x.service_type() == ServiceType::Application)
.collect::<Vec<_>>();
let _custom_domain_data_templates = self
let custom_domain_data_templates = self
.custom_domains
.iter()
.map(|cd| {
@@ -125,7 +125,7 @@ impl Service for Router {
})
.collect::<Vec<_>>();
let _route_data_templates = self
let route_data_templates = self
.routes
.iter()
.map(|r| {
@@ -145,6 +145,23 @@ impl Service for Router {
.map(|x| x.unwrap())
.collect::<Vec<_>>();
let router_default_domain_hash = crate::crypto::to_sha1_truncate_16(self.default_domain.as_str());
let tls_domain = format!("*.{}", kubernetes.dns_provider().domain());
context.insert("router_tls_domain", tls_domain.as_str());
context.insert("router_default_domain", self.default_domain.as_str());
context.insert("router_default_domain_hash", router_default_domain_hash.as_str());
context.insert("custom_domains", &custom_domain_data_templates);
context.insert("routes", &route_data_templates);
context.insert("spec_acme_email", "tls@qovery.com"); // TODO CHANGE ME
context.insert("metadata_annotations_cert_manager_cluster_issuer", "letsencrypt-qovery");
let lets_encrypt_url = match self.context.is_test_cluster() {
true => "https://acme-staging-v02.api.letsencrypt.org/directory",
false => "https://acme-v02.api.letsencrypt.org/directory",
};
context.insert("spec_acme_server", lets_encrypt_url);
Ok(context)
}
@@ -206,19 +223,44 @@ impl StatelessService for Router {}
impl Create for Router {
fn on_create(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
info!("Scaleway.router.on_create() called for {}", self.name());
let (kubernetes, _environment) = match target {
let (kubernetes, environment) = match target {
DeploymentTarget::ManagedServices(k, env) => (*k, *env),
DeploymentTarget::SelfHosted(k, env) => (*k, *env),
};
let _workspace_dir = self.workspace_directory();
let _helm_release_name = self.helm_release_name();
let workspace_dir = self.workspace_directory();
let helm_release_name = self.helm_release_name();
let _kubernetes_config_file_path = kubernetes.config_file_path()?;
let kubernetes_config_file_path = kubernetes.config_file_path()?;
// respect order - getting the context here and not before is mandatory
// the nginx-ingress must be available to get the external dns target if necessary
let _context = self.tera_context(target)?;
let context = self.tera_context(target)?;
let from_dir = format!("{}/scaleway/charts/q-ingress-tls", self.context.lib_root_dir());
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::template::generate_and_copy_all_files_into_dir(from_dir.as_str(), workspace_dir.as_str(), &context),
)?;
// do exec helm upgrade and return the last deployment status
let helm_history_row = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::cmd::helm::helm_exec_with_upgrade_history(
kubernetes_config_file_path.as_str(),
environment.namespace(),
helm_release_name.as_str(),
workspace_dir.as_str(),
Timeout::Default,
kubernetes.cloud_provider().credentials_environment_variables(),
),
)?;
if helm_history_row.is_none() || !helm_history_row.unwrap().is_successfully_deployed() {
return Err(self.engine_error(EngineErrorCause::Internal, "Router has failed to be deployed".into()));
}
Ok(())
}
@@ -229,6 +271,26 @@ impl Create for Router {
// check non custom domains
self.check_domains()?;
// Wait/Check that custom domain is a CNAME targeting qovery
for domain_to_check in self.custom_domains.iter() {
match check_cname_for(
self.progress_scope(),
self.listeners(),
&domain_to_check.domain,
self.context.execution_id(),
) {
Ok(cname) if cname.trim_end_matches('.') == domain_to_check.target_domain.trim_end_matches('.') => {
continue
}
Ok(err) | Err(err) => {
warn!(
"Invalid CNAME for {}. Might not be an issue if user is using a CDN: {}",
domain_to_check.domain, err
);
}
}
}
Ok(())
}
@@ -244,40 +306,25 @@ impl Create for Router {
}
impl Pause for Router {
fn on_pause(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> {
info!("SCW.router.on_pause() called for {}", self.name());
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Pause,
Box::new(|| delete_stateless_service(target, self, false)),
)
Ok(())
}
fn on_pause_check(&self) -> Result<(), EngineError> {
Ok(())
}
fn on_pause_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
fn on_pause_error(&self, _target: &DeploymentTarget) -> Result<(), EngineError> {
warn!("SCW.router.on_pause_error() called for {}", self.name());
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Pause,
Box::new(|| delete_stateless_service(target, self, true)),
)
Ok(())
}
}
impl Delete for Router {
fn on_delete(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
info!("SCW.router.on_delete() called for {}", self.name());
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Delete,
Box::new(|| delete_stateless_service(target, self, false)),
)
delete_router(target, self, false)
}
fn on_delete_check(&self) -> Result<(), EngineError> {
@@ -286,11 +333,6 @@ impl Delete for Router {
fn on_delete_error(&self, target: &DeploymentTarget) -> Result<(), EngineError> {
warn!("SCW.router.on_delete_error() called for {}", self.name());
send_progress_on_long_task(
self,
crate::cloud_provider::service::Action::Delete,
Box::new(|| delete_stateless_service(target, self, true)),
)
delete_router(target, self, true)
}
}

View File

@@ -5,3 +5,4 @@ pub const KUBECONFIG: &str = "KUBECONFIG";
pub const DIGITAL_OCEAN_TOKEN: &str = "DIGITAL_OCEAN_TOKEN";
pub const SCALEWAY_ACCESS_KEY: &str = "SCALEWAY_ACCESS_KEY";
pub const SCALEWAY_SECRET_KEY: &str = "SCALEWAY_SECRET_KEY";
pub const SCALEWAY_DEFAULT_PROJECT_ID: &str = "SCALEWAY_DEFAULT_PROJECT_ID";

View File

@@ -43,7 +43,7 @@ pub struct PushResult {
pub image: Image,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum Kind {
DockerHub,

View File

@@ -11,6 +11,9 @@ use crate::models::{
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
};
use crate::runtime::block_on;
use retry::delay::Fixed;
use retry::Error::Operation;
use retry::OperationResult;
pub struct ScalewayCR {
context: Context,
@@ -105,7 +108,7 @@ impl ScalewayCR {
None,
Some(image.name.as_str()),
None,
Some(image.registry_name.as_ref().unwrap_or(&"".to_string()).as_str()),
Some(self.default_project_id.as_str()),
)) {
Ok(res) => res.images,
Err(e) => {
@@ -169,16 +172,35 @@ impl ScalewayCR {
image.tag.clone(),
image_url.clone(),
) {
Ok(_) => {
let mut image = image.clone();
image.registry_url = Some(image_url);
Ok(PushResult { image })
Ok(_) => {}
Err(e) => {
return Err(self.engine_error(
EngineErrorCause::Internal,
e.message
.unwrap_or_else(|| "unknown error occurring during docker push".to_string()),
))
}
Err(e) => Err(self.engine_error(
EngineErrorCause::Internal,
e.message
.unwrap_or_else(|| "unknown error occurring during docker push".to_string()),
)),
};
let result = retry::retry(Fixed::from_millis(10000).take(12), || {
match self.does_image_exists(&image) {
true => OperationResult::Ok(&image),
false => {
warn!("image is not yet available on Scaleway Registry Namespace, retrying in a few seconds...");
OperationResult::Retry(())
}
}
});
let image_not_reachable = Err(self.engine_error(
EngineErrorCause::Internal,
"image has been pushed on Scaleway Registry Namespace but is not yet available after 2min. Please try to redeploy in a few minutes".to_string(),
));
match result {
Ok(_) => Ok(PushResult { image: image.clone() }),
Err(Operation { .. }) => image_not_reachable,
Err(retry::Error::Internal(_)) => image_not_reachable,
}
}
@@ -260,6 +282,17 @@ impl ScalewayCR {
self.create_registry_namespace(&image)
}
fn get_docker_json_config_raw(&self) -> String {
base64::encode(
format!(
r#"{{"auths":{{"rg.{}.scw.cloud":{{"auth":"{}"}}}}}}"#,
self.region.as_str(),
base64::encode(format!("nologin:{}", self.secret_token).as_bytes())
)
.as_bytes(),
)
}
}
impl ContainerRegistry for ScalewayCR {
@@ -308,17 +341,18 @@ impl ContainerRegistry for ScalewayCR {
let registry_url: String;
let registry_name: String;
match self.create_registry_namespace(&image) {
match self.get_or_create_registry_namespace(&image) {
Ok(registry) => {
info!(
"Scaleway registry namespace for {} has been created",
image.name.as_str()
);
image.registry_name = registry.name.clone();
image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name
image.registry_url = registry.endpoint.clone();
image.registry_secret = Some(self.secret_token.clone());
registry_url = image.registry_url.clone().unwrap_or("undefined".to_string());
registry_name = registry.clone().name.unwrap();
image.registry_docker_json_config = Some(self.get_docker_json_config_raw());
registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string());
registry_name = registry.name.unwrap();
}
Err(e) => {
error!(

View File

@@ -17,7 +17,7 @@ pub mod fs;
pub mod git;
pub mod models;
pub mod object_storage;
mod runtime;
pub mod runtime;
pub mod session;
mod string;
mod template;

View File

@@ -328,6 +328,7 @@ impl Application {
registry_name: None,
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
}
}
@@ -837,6 +838,7 @@ impl ExternalService {
registry_name: None,
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
}
}

View File

@@ -413,7 +413,7 @@ mod tests {
let result = ScalewayOS::is_bucket_name_valid(tc.bucket_name_input);
// verify:
assert_eq!(tc.expected_output, result);
assert_eq!(tc.expected_output, result, "{}", tc.description);
}
}
}

View File

@@ -137,11 +137,13 @@ dependencies = [
[[package]]
name = "bstr"
version = "0.2.14"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf"
checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279"
dependencies = [
"lazy_static",
"memchr",
"regex-automata",
]
[[package]]
@@ -1391,9 +1393,9 @@ dependencies = [
[[package]]
name = "js-sys"
version = "0.3.46"
version = "0.3.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175"
checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062"
dependencies = [
"wasm-bindgen",
]
@@ -1556,9 +1558,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "memchr"
version = "2.3.4"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc"
[[package]]
name = "memoffset"
@@ -1764,15 +1766,15 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
[[package]]
name = "openssl"
version = "0.10.32"
version = "0.10.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70"
checksum = "549430950c79ae24e6d02e0b7404534ecf311d94cc9f861e9e4020187d13d885"
dependencies = [
"bitflags",
"cfg-if 1.0.0",
"foreign-types",
"lazy_static",
"libc",
"once_cell",
"openssl-sys",
]
@@ -1784,9 +1786,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
[[package]]
name = "openssl-sys"
version = "0.9.60"
version = "0.9.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6"
checksum = "7a7907e3bfa08bb85105209cdfcb6c63d109f8f6c1ed6ca318fff5c1853fbc1d"
dependencies = [
"autocfg 1.0.1",
"cc",
@@ -3156,6 +3158,8 @@ dependencies = [
name = "test-utilities"
version = "0.1.0"
dependencies = [
"base64 0.13.0",
"bstr",
"chrono",
"curl",
"digitalocean",
@@ -3166,6 +3170,7 @@ dependencies = [
"rand 0.7.3",
"reqwest 0.10.8",
"retry",
"scaleway_api_rs",
"serde",
"serde_derive",
"serde_json",
@@ -3809,9 +3814,9 @@ dependencies = [
[[package]]
name = "vcpkg"
version = "0.2.11"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "version_check"
@@ -3871,9 +3876,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "wasm-bindgen"
version = "0.2.69"
version = "0.2.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e"
checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd"
dependencies = [
"cfg-if 1.0.0",
"serde",
@@ -3883,9 +3888,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.69"
version = "0.2.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62"
checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900"
dependencies = [
"bumpalo",
"lazy_static",
@@ -3910,9 +3915,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.69"
version = "0.2.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084"
checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4"
dependencies = [
"quote 1.0.8",
"wasm-bindgen-macro-support",
@@ -3920,9 +3925,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.69"
version = "0.2.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549"
checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97"
dependencies = [
"proc-macro2 1.0.27",
"quote 1.0.8",
@@ -3933,15 +3938,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.69"
version = "0.2.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158"
checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f"
[[package]]
name = "web-sys"
version = "0.3.46"
version = "0.3.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3"
checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582"
dependencies = [
"js-sys",
"wasm-bindgen",

View File

@@ -7,6 +7,8 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
base64 = "0.13.0"
bstr = "0.2.16"
qovery-engine = { path = "../" }
chrono = "0.4.11"
dirs = "3.0.1"
@@ -25,3 +27,6 @@ hashicorp_vault = "2.0.1"
# Digital Ocean Deps
digitalocean = "0.1.1"
# Scaleway Deps
scaleway_api_rs = "0.1.1"

View File

@@ -36,8 +36,8 @@ pub fn container_registry_scw(context: &Context) -> ScalewayCR {
ScalewayCR::new(
context.clone(),
format!("default-ecr-registry-qovery-test-{}", random_id.clone()).as_str(),
format!("default-ecr-registry-qovery-test-{}", random_id.clone()).as_str(),
format!("default-registry-qovery-test-{}", random_id.clone()).as_str(),
format!("default-registry-qovery-test-{}", random_id.clone()).as_str(),
scw_secret_key.as_str(),
scw_default_project_id.as_str(),
SCW_TEST_REGION,

View File

@@ -1,31 +1,40 @@
extern crate base64;
extern crate bstr;
extern crate scaleway_api_rs;
use bstr::ByteSlice;
use chrono::Utc;
use curl::easy::Easy;
use dirs::home_dir;
use gethostname;
use std::fs::read_to_string;
use std::fs::File;
use std::io::{Error, ErrorKind, Write};
use std::path::Path;
use std::str::FromStr;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use retry::delay::Fibonacci;
use retry::OperationResult;
use std::env;
use std::os::unix::fs::PermissionsExt;
use tracing::{info, warn};
use std::fs;
use tracing::{error, info, warn};
use tracing_subscriber;
use crate::aws::KUBE_CLUSTER_ID;
use hashicorp_vault;
use qovery_engine::build_platform::local_docker::LocalDocker;
use qovery_engine::cloud_provider::scaleway::application::Region;
use qovery_engine::cloud_provider::Kind;
use qovery_engine::cmd;
use qovery_engine::constants::{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY};
use qovery_engine::constants::{
AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, SCALEWAY_ACCESS_KEY, SCALEWAY_DEFAULT_PROJECT_ID, SCALEWAY_SECRET_KEY,
};
use qovery_engine::error::{SimpleError, SimpleErrorKind};
use qovery_engine::models::{Context, Environment, Features, Metadata};
use serde::{Deserialize, Serialize};
extern crate time;
use crate::scaleway::SCW_TEST_CLUSTER_ID;
use qovery_engine::cmd::structs::{KubernetesList, KubernetesPod};
use qovery_engine::runtime::block_on;
use time::Instant;
pub fn context() -> Context {
@@ -369,50 +378,139 @@ fn curl_path(path: &str) -> bool {
}
fn kubernetes_config_path(
provider_kind: Kind,
workspace_directory: &str,
kubernetes_cluster_id: &str,
access_key_id: &str,
secret_access_key: &str,
secrets: FuncTestsSecrets,
) -> Result<String, SimpleError> {
let kubernetes_config_bucket_name = format!("qovery-kubeconfigs-{}", kubernetes_cluster_id);
let kubernetes_config_object_key = format!("{}.yaml", kubernetes_cluster_id);
let kubernetes_config_file_path = format!("{}/kubernetes_config_{}", workspace_directory, kubernetes_cluster_id);
let _ = get_kubernetes_config_file(
access_key_id,
secret_access_key,
kubernetes_config_bucket_name.as_str(),
kubernetes_config_object_key.as_str(),
kubernetes_config_file_path.as_str(),
provider_kind,
kubernetes_config_bucket_name,
kubernetes_config_object_key,
kubernetes_config_file_path.clone(),
secrets.clone(),
)?;
Ok(kubernetes_config_file_path)
}
fn get_kubernetes_config_file<P>(
access_key_id: &str,
secret_access_key: &str,
kubernetes_config_bucket_name: &str,
kubernetes_config_object_key: &str,
provider_kind: Kind,
kubernetes_config_bucket_name: String,
kubernetes_config_object_key: String,
file_path: P,
) -> Result<File, SimpleError>
secrets: FuncTestsSecrets,
) -> Result<fs::File, SimpleError>
where
P: AsRef<Path>,
{
// return the file if it already exists
let _ = match File::open(file_path.as_ref()) {
// return the file if it already exists and should use cache
let _ = match fs::File::open(file_path.as_ref()) {
Ok(f) => return Ok(f),
Err(_) => {}
};
let file_content_result = retry::retry(Fibonacci::from_millis(3000).take(5), || {
let file_content = get_object_via_aws_cli(
access_key_id,
secret_access_key,
kubernetes_config_bucket_name,
kubernetes_config_object_key,
);
let file_content = match provider_kind {
Kind::Aws => {
let access_key_id = secrets.clone().AWS_ACCESS_KEY_ID.unwrap();
let secret_access_key = secrets.clone().AWS_SECRET_ACCESS_KEY.unwrap();
aws_s3_get_object(
access_key_id.as_str(),
secret_access_key.as_str(),
kubernetes_config_bucket_name.as_str(),
kubernetes_config_object_key.as_str(),
)
}
Kind::Do => todo!(),
Kind::Scw => {
// TODO(benjaminch): refactor all of this properly
let region = Region::from_str(secrets.clone().SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap();
let project_id = secrets.clone().SCALEWAY_DEFAULT_PROJECT_ID.unwrap();
let secret_access_key = secrets.clone().SCALEWAY_SECRET_KEY.unwrap();
let configuration = scaleway_api_rs::apis::configuration::Configuration {
api_key: Some(scaleway_api_rs::apis::configuration::ApiKey {
key: secret_access_key.to_string(),
prefix: None,
}),
..scaleway_api_rs::apis::configuration::Configuration::default()
};
let clusters_res = block_on(scaleway_api_rs::apis::clusters_api::list_clusters(
&configuration,
region.to_string().as_str(),
None,
Some(project_id.as_str()),
None,
None,
None,
None,
None,
None,
));
if let Err(e) = clusters_res {
let message = format!("error while trying to get clusters, error: {}", e.to_string());
error!("{}", message);
return OperationResult::Retry(SimpleError::new(SimpleErrorKind::Other, Some(message.as_str())));
}
let clusters = clusters_res.unwrap();
if clusters.clusters.is_none() {
let message = "error while trying to get clusters, error: no clusters found";
error!("{}", message);
return OperationResult::Retry(SimpleError::new(SimpleErrorKind::Other, Some(message)));
}
let clusters = clusters.clusters.unwrap();
let expected_test_server_tag = format!("ClusterId={}", SCW_TEST_CLUSTER_ID);
for cluster in clusters.iter() {
if cluster.tags.is_some() {
for tag in cluster.tags.as_ref().unwrap().iter() {
if tag.as_str() == expected_test_server_tag.as_str() {
match block_on(scaleway_api_rs::apis::clusters_api::get_cluster_kube_config(
&configuration,
region.as_str(),
cluster.id.as_ref().unwrap().as_str(),
)) {
Ok(res) => {
return OperationResult::Ok(
base64::decode(res.content.unwrap())
.unwrap()
.to_str()
.unwrap()
.to_string(),
);
}
Err(e) => {
let message =
format!("error while trying to get clusters, error: {}", e.to_string());
error!("{}", message);
return OperationResult::Retry(SimpleError::new(
SimpleErrorKind::Other,
Some(message.as_str()),
));
}
};
}
}
}
}
Err(SimpleError::new(SimpleErrorKind::Other, Some("Test cluster not found")))
}
};
match file_content {
Ok(file_content) => OperationResult::Ok(file_content),
@@ -430,27 +528,60 @@ where
}
};
let mut kubernetes_config_file = File::create(file_path.as_ref())?;
let mut kubernetes_config_file = fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(file_path.as_ref())?;
let _ = kubernetes_config_file.write_all(file_content.as_bytes())?;
// removes warning kubeconfig is (world/group) readable
let metadata = kubernetes_config_file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o400);
std::fs::set_permissions(file_path.as_ref(), permissions)?;
let mut perms = fs::metadata(file_path.as_ref())?.permissions();
perms.set_readonly(false);
fs::set_permissions(file_path.as_ref(), perms)?;
Ok(kubernetes_config_file)
}
/// gets an aws s3 object using aws-cli
/// used as a failover when rusoto_s3 acts up
fn get_object_via_aws_cli(
type KubernetesCredentials<'a> = Vec<(&'a str, &'a str)>;
fn get_cloud_provider_credentials<'a>(provider_kind: Kind, secrets: &'a FuncTestsSecrets) -> KubernetesCredentials<'a> {
match provider_kind {
Kind::Aws => vec![
(AWS_ACCESS_KEY_ID, secrets.AWS_ACCESS_KEY_ID.as_ref().unwrap().as_str()),
(
AWS_SECRET_ACCESS_KEY,
secrets.AWS_SECRET_ACCESS_KEY.as_ref().unwrap().as_str(),
),
],
Kind::Do => todo!(),
Kind::Scw => vec![
(
SCALEWAY_ACCESS_KEY,
secrets.SCALEWAY_ACCESS_KEY.as_ref().unwrap().as_str(),
),
(
SCALEWAY_SECRET_KEY,
secrets.SCALEWAY_SECRET_KEY.as_ref().unwrap().as_str(),
),
(
SCALEWAY_DEFAULT_PROJECT_ID,
secrets.SCALEWAY_DEFAULT_PROJECT_ID.as_ref().unwrap().as_str(),
),
],
}
}
fn aws_s3_get_object(
access_key_id: &str,
secret_access_key: &str,
bucket_name: &str,
object_key: &str,
) -> Result<String, SimpleError> {
let s3_url = format!("s3://{}/{}", bucket_name, object_key);
let local_path = format!("/tmp/{}", object_key); // FIXME: change hardcoded /tmp/
// gets an aws s3 object using aws-cli
// used as a failover when rusoto_s3 acts up
let s3_url = format!("s3://{}/{}", bucket_name, object_key);
qovery_engine::cmd::utilities::exec(
"aws",
vec!["s3", "cp", &s3_url, &local_path],
@@ -460,11 +591,14 @@ fn get_object_via_aws_cli(
],
)?;
let s = read_to_string(&local_path)?;
let s = fs::read_to_string(&local_path)?;
Ok(s)
}
pub fn is_pod_restarted_aws_env(
pub fn is_pod_restarted_env(
provider_kind: Kind,
kube_cluster_id: &str,
environment_check: Environment,
pod_to_check: &str,
secrets: FuncTestsSecrets,
@@ -475,14 +609,7 @@ pub fn is_pod_restarted_aws_env(
&environment_check.id.clone(),
);
let access_key = secrets.AWS_ACCESS_KEY_ID.unwrap();
let secret_key = secrets.AWS_SECRET_ACCESS_KEY.unwrap();
let aws_credentials_envs = vec![
("AWS_ACCESS_KEY_ID", access_key.as_str()),
("AWS_SECRET_ACCESS_KEY", secret_key.as_str()),
];
let kubernetes_config = kubernetes_config_path("/tmp", KUBE_CLUSTER_ID, access_key.as_str(), secret_key.as_str());
let kubernetes_config = kubernetes_config_path(provider_kind.clone(), "/tmp", kube_cluster_id, secrets.clone());
match kubernetes_config {
Ok(path) => {
@@ -490,7 +617,7 @@ pub fn is_pod_restarted_aws_env(
path.as_str(),
namespace_name.clone().as_str(),
pod_to_check,
aws_credentials_envs,
get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()),
);
match restarted_database {
Ok(count) => match count.trim().eq("0") {
@@ -504,9 +631,11 @@ pub fn is_pod_restarted_aws_env(
}
}
pub fn get_pods_aws(
pub fn get_pods(
provider_kind: Kind,
environment_check: Environment,
pod_to_check: &str,
kube_cluster_id: &str,
secrets: FuncTestsSecrets,
) -> Result<KubernetesList<KubernetesPod>, SimpleError> {
let namespace_name = format!(
@@ -515,20 +644,13 @@ pub fn get_pods_aws(
&environment_check.id.clone(),
);
let access_key = secrets.AWS_ACCESS_KEY_ID.unwrap();
let secret_key = secrets.AWS_SECRET_ACCESS_KEY.unwrap();
let aws_credentials_envs = vec![
("AWS_ACCESS_KEY_ID", access_key.as_str()),
("AWS_SECRET_ACCESS_KEY", secret_key.as_str()),
];
let kubernetes_config = kubernetes_config_path("/tmp", KUBE_CLUSTER_ID, access_key.as_str(), secret_key.as_str());
let kubernetes_config = kubernetes_config_path(provider_kind.clone(), "/tmp", kube_cluster_id, secrets.clone());
cmd::kubectl::kubectl_exec_get_pod(
kubernetes_config.unwrap().as_str(),
namespace_name.clone().as_str(),
pod_to_check,
aws_credentials_envs,
get_cloud_provider_credentials(provider_kind.clone(), &secrets.clone()),
)
}

View File

@@ -3,6 +3,7 @@ extern crate test_utilities;
use test_utilities::utilities::{init, FuncTestsSecrets};
use tracing::{span, Level};
use qovery_engine::cloud_provider::Kind as ProviderKind;
use qovery_engine::models::{
Action, Clone2, Context, Database, DatabaseKind, Environment, EnvironmentAction, EnvironmentVariable, Kind,
};
@@ -10,7 +11,8 @@ use qovery_engine::transaction::TransactionResult;
use crate::aws::aws_environment::{ctx_pause_environment, delete_environment, deploy_environment};
use self::test_utilities::utilities::{context, engine_run_test, generate_id, get_pods_aws, is_pod_restarted_aws_env};
use self::test_utilities::aws::KUBE_CLUSTER_ID;
use self::test_utilities::utilities::{context, engine_run_test, generate_id, get_pods, is_pod_restarted_env};
/**
**
@@ -93,7 +95,13 @@ fn deploy_an_environment_with_db_and_pause_it() {
// Check that we have actually 0 pods running for this db
let app_name = format!("postgresql{}-0", environment.databases[0].name);
let ret = get_pods_aws(environment.clone(), app_name.clone().as_str(), secrets.clone());
let ret = get_pods(
ProviderKind::Aws,
environment.clone(),
app_name.clone().as_str(),
KUBE_CLUSTER_ID,
secrets.clone(),
);
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
@@ -151,7 +159,13 @@ fn postgresql_failover_dev_environment_with_all_options() {
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("postgresql{}-0", &environment_check.databases[0].name);
match is_pod_restarted_aws_env(environment_check.clone(), database_name.as_str(), secrets.clone()) {
match is_pod_restarted_env(
ProviderKind::Aws,
KUBE_CLUSTER_ID,
environment_check.clone(),
database_name.as_str(),
secrets.clone(),
) {
(true, _) => assert!(true),
(false, _) => assert!(false),
}
@@ -161,7 +175,13 @@ fn postgresql_failover_dev_environment_with_all_options() {
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY EVEN IF FAIL
match is_pod_restarted_aws_env(environment_check.clone(), database_name.as_str(), secrets) {
match is_pod_restarted_env(
ProviderKind::Aws,
KUBE_CLUSTER_ID,
environment_check.clone(),
database_name.as_str(),
secrets,
) {
(true, _) => assert!(true),
(false, _) => assert!(false),
}
@@ -322,7 +342,13 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("postgresql{}-0", &environment_check.databases[0].name);
match is_pod_restarted_aws_env(environment_check, database_name.as_str(), secrets) {
match is_pod_restarted_env(
ProviderKind::Aws,
KUBE_CLUSTER_ID,
environment_check,
database_name.as_str(),
secrets,
) {
(true, _) => assert!(true),
(false, _) => assert!(false),
}

View File

@@ -1,9 +1,9 @@
extern crate test_utilities;
use self::test_utilities::aws::KUBE_CLUSTER_ID;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{
engine_run_test, generate_id, get_pods_aws, is_pod_restarted_aws_env, FuncTestsSecrets,
};
use self::test_utilities::utilities::{engine_run_test, generate_id, get_pods, is_pod_restarted_env, FuncTestsSecrets};
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{Action, Clone2, Context, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use test_utilities::utilities::context;
@@ -133,7 +133,13 @@ fn deploy_a_working_environment_and_pause_it_eks() {
// Check that we have actually 0 pods running for this app
let app_name = format!("{}-0", environment.applications[0].name);
let ret = get_pods_aws(environment.clone(), app_name.clone().as_str(), secrets.clone());
let ret = get_pods(
Kind::Aws,
environment.clone(),
app_name.clone().as_str(),
KUBE_CLUSTER_ID,
secrets.clone(),
);
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
@@ -434,7 +440,13 @@ fn redeploy_same_app_with_ebs() {
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
let app_name = format!("{}-0", &environment_check1.applications[0].name);
let (_, number) = is_pod_restarted_aws_env(environment_check1, app_name.clone().as_str(), secrets.clone());
let (_, number) = is_pod_restarted_env(
Kind::Aws,
KUBE_CLUSTER_ID,
environment_check1,
app_name.clone().as_str(),
secrets.clone(),
);
match deploy_environment(&context_bis, &ea2) {
TransactionResult::Ok => assert!(true),
@@ -442,7 +454,13 @@ fn redeploy_same_app_with_ebs() {
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
let (_, number2) = is_pod_restarted_aws_env(environment_check2, app_name.as_str(), secrets);
let (_, number2) = is_pod_restarted_env(
Kind::Aws,
KUBE_CLUSTER_ID,
environment_check2,
app_name.as_str(),
secrets,
);
//nothing change in the app, so, it shouldn't be restarted
assert!(number.eq(&number2));
match delete_environment(&context_for_deletion, &ea_delete) {

View File

@@ -56,6 +56,7 @@ fn test_get_registry_namespace() {
registry_name: Some(registry_name.to_string()),
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
};
container_registry
@@ -114,6 +115,7 @@ fn test_create_registry_namespace() {
registry_name: Some(registry_name.to_string()),
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
};
// execute:
@@ -165,6 +167,7 @@ fn test_delete_registry_namespace() {
registry_name: Some(registry_name.to_string()),
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
};
container_registry
@@ -211,6 +214,7 @@ fn test_get_or_create_registry_namespace() {
registry_name: Some(registry_name.to_string()),
registry_secret: None,
registry_url: None,
registry_docker_json_config: None,
};
container_registry

View File

@@ -1,23 +1,17 @@
extern crate test_utilities;
use self::test_utilities::cloudflare;
use self::test_utilities::utilities::{engine_run_test, generate_id, FuncTestsSecrets};
use qovery_engine::cloud_provider::scaleway::application::Region;
use self::test_utilities::scaleway::SCW_TEST_CLUSTER_ID;
use self::test_utilities::utilities::{engine_run_test, generate_id, get_pods, is_pod_restarted_env, FuncTestsSecrets};
use qovery_engine::cloud_provider::Kind;
use qovery_engine::models::{Action, Clone2, Context, EnvironmentAction, Storage, StorageType};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use std::str::FromStr;
use test_utilities::utilities::context;
use test_utilities::utilities::init;
use tracing::{span, Level};
// Note: All those tests relies on a test cluster running on Scaleway infrastructure.
// This cluster should be live in order to have those tests passing properly.
pub fn deploy_environment(
context: &Context,
region: Region,
environment_action: EnvironmentAction,
) -> TransactionResult {
pub fn deploy_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = test_utilities::scaleway::docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
@@ -25,7 +19,6 @@ pub fn deploy_environment(
let cp = test_utilities::scaleway::cloud_provider_scaleway(context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let dns_provider = test_utilities::cloudflare::dns_provider_cloudflare(context);
let object_storage = test_utilities::scaleway::scw_object_storage(context.clone(), region);
let kapsule = test_utilities::scaleway::scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes);
let _ = tx.deploy_environment_with_options(
@@ -40,11 +33,7 @@ pub fn deploy_environment(
tx.commit()
}
pub fn delete_environment(
context: &Context,
region: Region,
environment_action: EnvironmentAction,
) -> TransactionResult {
pub fn delete_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = test_utilities::scaleway::docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
@@ -52,7 +41,6 @@ pub fn delete_environment(
let cp = test_utilities::scaleway::cloud_provider_scaleway(context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let dns_provider = test_utilities::cloudflare::dns_provider_cloudflare(context);
let object_storage = test_utilities::scaleway::scw_object_storage(context.clone(), region);
let kapsule = test_utilities::scaleway::scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes);
let _ = tx.delete_environment(&kapsule, &environment_action);
@@ -60,16 +48,27 @@ pub fn delete_environment(
tx.commit()
}
pub fn pause_environment(context: &Context, environment_action: EnvironmentAction) -> TransactionResult {
let engine = test_utilities::scaleway::docker_scw_cr_engine(context);
let session = engine.session().unwrap();
let mut tx = session.transaction();
let cp = test_utilities::scaleway::cloud_provider_scaleway(context);
let nodes = test_utilities::scaleway::scw_kubernetes_nodes();
let dns_provider = test_utilities::cloudflare::dns_provider_cloudflare(context);
let kapsule = test_utilities::scaleway::scw_kubernetes_kapsule(context, &cp, &dns_provider, nodes);
let _ = tx.pause_environment(&kapsule, &environment_action);
tx.commit()
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
#[ignore]
fn deploy_a_working_environment_with_no_router_on_scaleway_kapsule() {
fn scaleway_kapsule_deploy_a_working_environment_with_no_router() {
engine_run_test(|| {
let span = span!(
Level::INFO,
"test",
name = "deploy_a_working_environment_with_no_router_on_scaleway_kapsule"
);
let test_name = "scaleway_kapsule_deploy_a_working_environment_with_no_router";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
@@ -84,34 +83,29 @@ fn deploy_a_working_environment_with_no_router_on_scaleway_kapsule() {
let env_action = EnvironmentAction::Environment(environment);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let region = Region::from_str(secrets.SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap();
match deploy_environment(&context, region, env_action) {
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, region, env_action_for_delete) {
match delete_environment(&context_for_delete, env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
return "deploy_a_working_environment_with_no_router_on_scaleway_kapsule".to_string();
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
#[ignore]
fn deploy_a_not_working_environment_with_no_router_on_scaleway_kapsule() {
fn scaleway_kapsule_deploy_a_not_working_environment_with_no_router() {
engine_run_test(|| {
let span = span!(
Level::INFO,
"test",
name = "deploy_a_not_working_environment_with_no_router_on_scaleway_kapsule"
);
let test_name = "scaleway_kapsule_deploy_a_not_working_environment_with_no_router";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
@@ -126,34 +120,87 @@ fn deploy_a_not_working_environment_with_no_router_on_scaleway_kapsule() {
let env_action = EnvironmentAction::Environment(environment);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let region = Region::from_str(secrets.SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap();
match deploy_environment(&context, region, env_action) {
TransactionResult::Ok => assert!(true),
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, region, env_action_for_delete) {
match delete_environment(&context_for_delete, env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
return "deploy_a_not_working_environment_with_no_router_on_scaleway_kapsule".to_string();
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
#[ignore]
fn build_with_buildpacks_and_deploy_a_working_environment_on_scaleway_kapsule() {
fn scaleway_kapsule_deploy_a_working_environment_and_pause() {
engine_run_test(|| {
let span = span!(
Level::INFO,
"test",
name = "build_with_buildpacks_and_deploy_a_working_environment_on_scaleway_kapsule"
let test_name = "scaleway_kapsule_deploy_a_working_environment_and_pause";
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::scaleway::working_minimal_environment(&context, secrets.clone());
let env_action = EnvironmentAction::Environment(environment.clone());
match deploy_environment(&context, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match pause_environment(&context_for_delete, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Check that we have actually 0 pods running for this app
let app_name = format!("{}-0", environment.applications[0].name);
let ret = get_pods(
Kind::Scw,
environment.clone(),
app_name.clone().as_str(),
SCW_TEST_CLUSTER_ID,
secrets.clone(),
);
assert_eq!(ret.is_ok(), true);
assert_eq!(ret.unwrap().items.is_empty(), true);
// Check we can resume the env
let ctx_resume = context.clone_not_same_execution_id();
match deploy_environment(&ctx_resume, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// Cleanup
match delete_environment(&context_for_delete, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_build_with_buildpacks_and_deploy_a_working_environment";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
@@ -177,59 +224,444 @@ fn build_with_buildpacks_and_deploy_a_working_environment_on_scaleway_kapsule()
let env_action = EnvironmentAction::Environment(environment);
let env_action_for_delete = EnvironmentAction::Environment(environment_for_delete);
let region = Region::from_str(secrets.SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap();
match deploy_environment(&context, region, env_action) {
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, region, env_action_for_delete) {
match delete_environment(&context_for_delete, env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
return "build_with_buildpacks_and_deploy_a_working_environment_on_scaleway_kapsule".to_string();
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
#[ignore]
fn deploy_a_working_environment_with_domain_on_scaleway_kapsule() {
fn scaleway_kapsule_deploy_a_working_environment_with_domain() {
engine_run_test(|| {
let span = span!(
Level::INFO,
"test",
name = "deploy_a_working_environment_with_domain_on_scaleway_kapsule"
);
let test_name = "scaleway_kapsule_deploy_a_working_environment_with_domain";
let span = span!(Level::INFO, "test",);
let _enter = span.enter();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::aws::working_minimal_environment(&context, secrets.clone());
let environment = test_utilities::scaleway::working_minimal_environment(&context, secrets.clone());
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment);
let env_action_for_delete = EnvironmentAction::Environment(environment_delete);
let region = Region::from_str(secrets.SCALEWAY_DEFAULT_REGION.unwrap().as_str()).unwrap();
match deploy_environment(&context, region, env_action) {
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, region, env_action_for_delete) {
match delete_environment(&context_for_delete, env_action_for_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
return "deploy_a_working_environment_with_domain_on_scaleway_kapsule".to_string();
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_deploy_a_working_environment_with_storage() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_a_working_environment_with_storage";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
let context_for_deletion = context.clone_not_same_execution_id();
let secrets = FuncTestsSecrets::new();
let mut environment = test_utilities::scaleway::working_minimal_environment(&context, secrets);
// Todo: make an image that check there is a mounted disk
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.storage = vec![Storage {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment);
let env_action_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO(benjaminch): check the disk is here and with correct size, can use Scaleway API
match delete_environment(&context_for_deletion, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_redeploy_same_app() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_redeploy_same_app";
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
let context = context();
let context_bis = context.clone_not_same_execution_id();
let context_for_deletion = context.clone_not_same_execution_id();
let secrets = FuncTestsSecrets::new();
let mut environment = test_utilities::scaleway::working_minimal_environment(&context, secrets.clone());
// Todo: make an image that check there is a mounted disk
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.storage = vec![Storage {
id: generate_id(),
name: "photos".to_string(),
storage_type: StorageType::Ssd,
size_in_gib: 10,
mount_point: "/mnt/photos".to_string(),
snapshot_retention_in_days: 0,
}];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
let environment_redeploy = environment.clone();
let environment_check1 = environment.clone();
let environment_check2 = environment.clone();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment);
let env_action_redeploy = EnvironmentAction::Environment(environment_redeploy);
let env_action_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
let app_name = format!("{}-0", &environment_check1.applications[0].name);
let (_, number) = is_pod_restarted_env(
Kind::Scw,
SCW_TEST_CLUSTER_ID,
environment_check1,
app_name.clone().as_str(),
secrets.clone(),
);
match deploy_environment(&context_bis, env_action_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
let (_, number2) = is_pod_restarted_env(
Kind::Scw,
SCW_TEST_CLUSTER_ID,
environment_check2,
app_name.as_str(),
secrets,
);
// nothing changed in the app, so, it shouldn't be restarted
assert!(number.eq(&number2));
match delete_environment(&context_for_deletion, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_deploy_a_not_working_environment_and_then_working_environment() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_a_not_working_environment_and_after_working_environment";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
let context_for_not_working = context.clone_not_same_execution_id();
let context_for_delete = context.clone_not_same_execution_id();
let secrets = FuncTestsSecrets::new();
// env part generation
let environment = test_utilities::scaleway::working_minimal_environment(&context, secrets);
let mut environment_for_not_working = environment.clone();
// this environment is broken by container exit
environment_for_not_working.applications = environment_for_not_working
.applications
.into_iter()
.map(|mut app| {
app.git_url = "https://github.com/Qovery/engine-testing.git".to_string();
app.branch = "1app_fail_deploy".to_string();
app.commit_id = "5b89305b9ae8a62a1f16c5c773cddf1d12f70db1".to_string();
app.environment_variables = vec![];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
let mut environment_for_delete = environment.clone();
environment_for_delete.action = Action::Delete;
// environment actions
let env_action = EnvironmentAction::Environment(environment);
let env_action_not_working = EnvironmentAction::Environment(environment_for_not_working);
let env_action_delete = EnvironmentAction::Environment(environment_for_delete);
match deploy_environment(&context_for_not_working, env_action_not_working) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
#[ignore] // TODO(benjaminch): Make it work (it doesn't work on AWS neither)
fn scaleway_kapsule_deploy_ok_fail_fail_ok_environment() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_ok_fail_fail_ok_environment";
let span = span!(Level::INFO, "test", name = test_name);
let _enter = span.enter();
// working env
let context = context();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::scaleway::working_minimal_environment(&context, secrets);
// not working 1
let context_for_not_working_1 = context.clone_not_same_execution_id();
let mut not_working_env_1 = environment.clone();
not_working_env_1.applications = not_working_env_1
.applications
.into_iter()
.map(|mut app| {
app.git_url = "https://gitlab.com/maathor/my-exit-container".to_string();
app.branch = "master".to_string();
app.commit_id = "55bc95a23fbf91a7699c28c5f61722d4f48201c9".to_string();
app.environment_variables = vec![];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
// not working 2
let context_for_not_working_2 = context.clone_not_same_execution_id();
let not_working_env_2 = not_working_env_1.clone();
// work for delete
let context_for_delete = context.clone_not_same_execution_id();
let mut delete_env = environment.clone();
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment);
let env_action_not_working_1 = EnvironmentAction::Environment(not_working_env_1);
let env_action_not_working_2 = EnvironmentAction::Environment(not_working_env_2);
let env_action_delete = EnvironmentAction::Environment(delete_env);
// OK
match deploy_environment(&context, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// FAIL and rollback
match deploy_environment(&context_for_not_working_1, env_action_not_working_1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// FAIL and Rollback again
match deploy_environment(&context_for_not_working_2, env_action_not_working_2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(true),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
// Should be working
match deploy_environment(&context, env_action.clone()) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match delete_environment(&context_for_delete, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_deploy_a_non_working_environment_with_no_failover() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_a_non_working_environment_with_no_failover";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::scaleway::non_working_environment(&context, secrets);
let context_for_delete = context.clone_not_same_execution_id();
let mut delete_env = environment.clone();
delete_env.action = Action::Delete;
let env_action = EnvironmentAction::Environment(environment);
let env_action_delete = EnvironmentAction::Environment(delete_env);
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_delete, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_deploy_a_non_working_environment_with_a_working_failover() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_a_non_working_environment_with_a_working_failover";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
// context for non working environment
let context = context();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::scaleway::non_working_environment(&context, secrets.clone());
let failover_environment = test_utilities::scaleway::working_minimal_environment(&context, secrets.clone());
// context for deletion
let context_deletion = context.clone_not_same_execution_id();
let mut delete_env = test_utilities::scaleway::working_minimal_environment(&context_deletion, secrets);
delete_env.action = Action::Delete;
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action = EnvironmentAction::EnvironmentWithFailover(environment, failover_environment);
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
test_name.to_string()
})
}
#[cfg(feature = "test-scw-self-hosted")]
#[test]
fn scaleway_kapsule_deploy_a_non_working_environment_with_a_non_working_failover() {
engine_run_test(|| {
let test_name = "scaleway_kapsule_deploy_a_non_working_environment_with_a_non_working_failover";
let span = span!(Level::INFO, "test", name = test_name,);
let _enter = span.enter();
let context = context();
let secrets = FuncTestsSecrets::new();
let environment = test_utilities::scaleway::non_working_environment(&context, secrets.clone());
let failover_environment = test_utilities::scaleway::non_working_environment(&context, secrets.clone());
let context_for_deletion = context.clone_not_same_execution_id();
let mut delete_env = test_utilities::scaleway::non_working_environment(&context_for_deletion, secrets);
delete_env.action = Action::Delete;
// environment action initialize
let env_action_delete = EnvironmentAction::Environment(delete_env);
let env_action = EnvironmentAction::EnvironmentWithFailover(environment, failover_environment);
match deploy_environment(&context, env_action) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_for_deletion, env_action_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
test_name.to_string()
})
}

View File

@@ -81,6 +81,7 @@ fn create_upgrade_and_destroy_kapsule_cluster(
});
}
#[allow(dead_code)]
fn create_and_destroy_kapsule_cluster(
region: Region,
secrets: FuncTestsSecrets,

View File

@@ -8,6 +8,7 @@ use qovery_engine::object_storage::scaleway_object_storage::{BucketDeleteStrateg
use qovery_engine::object_storage::ObjectStorage;
use tempfile::NamedTempFile;
#[allow(dead_code)]
const TEST_REGION: Region = Region::Paris;
#[cfg(feature = "test-scw-infra")]

View File

@@ -1,10 +1,9 @@
extern crate test_utilities;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::{context, engine_run_test, generate_cluster_id, init, FuncTestsSecrets};
use self::test_utilities::utilities::{context, engine_run_test, init, FuncTestsSecrets};
use tracing::{span, Level};
use qovery_engine::cloud_provider::scaleway::application::Region;
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
use qovery_engine::transaction::TransactionResult;