mirror of
https://github.com/jlengrand/engine.git
synced 2026-03-10 08:11:21 +00:00
tests: fix sticky session test (#716)
This CL fixes sticky sessions tests, using ingress IP instead of router DNS avoiding to depend on DNS propagation delays.
This commit is contained in:
@@ -11,9 +11,10 @@ use crate::cloud_provider::digitalocean::models::svc::DoLoadBalancer;
|
||||
use crate::cloud_provider::metrics::KubernetesApiMetrics;
|
||||
use crate::cmd::command::QoveryCommand;
|
||||
use crate::cmd::structs::{
|
||||
Configmap, Daemonset, Item, KubernetesEvent, KubernetesJob, KubernetesKind, KubernetesList, KubernetesNode,
|
||||
KubernetesPod, KubernetesPodStatusPhase, KubernetesPodStatusReason, KubernetesService, KubernetesVersion,
|
||||
LabelsContent, Namespace, Secrets, HPA, PDB, PVC, SVC,
|
||||
Configmap, Daemonset, Item, KubernetesEvent, KubernetesIngress, KubernetesIngressStatusLoadBalancerIngress,
|
||||
KubernetesJob, KubernetesKind, KubernetesList, KubernetesNode, KubernetesPod, KubernetesPodStatusPhase,
|
||||
KubernetesPodStatusReason, KubernetesService, KubernetesVersion, LabelsContent, Namespace, Secrets, HPA, PDB, PVC,
|
||||
SVC,
|
||||
};
|
||||
use crate::constants::KUBECONFIG;
|
||||
use crate::error::{SimpleError, SimpleErrorKind};
|
||||
@@ -193,6 +194,28 @@ where
|
||||
Ok(Some(result.status.load_balancer.ingress.first().unwrap().hostname.clone()))
|
||||
}
|
||||
|
||||
pub fn kubectl_exec_get_external_ingress<P>(
|
||||
kubernetes_config: P,
|
||||
namespace: &str,
|
||||
name: &str,
|
||||
envs: Vec<(&str, &str)>,
|
||||
) -> Result<Option<KubernetesIngressStatusLoadBalancerIngress>, CommandError>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let result = kubectl_exec::<P, KubernetesIngress>(
|
||||
vec!["get", "-n", namespace, "ing", name, "-o", "json"],
|
||||
kubernetes_config,
|
||||
envs,
|
||||
)?;
|
||||
|
||||
if result.status.load_balancer.ingress.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(result.status.load_balancer.ingress.first().unwrap().clone()))
|
||||
}
|
||||
|
||||
pub fn kubectl_exec_is_pod_ready_with_retry<P>(
|
||||
kubernetes_config: P,
|
||||
namespace: &str,
|
||||
|
||||
@@ -15,6 +15,12 @@ pub struct KubernetesService {
|
||||
pub status: KubernetesServiceStatus,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesIngress {
|
||||
pub status: KubernetesIngressStatus,
|
||||
}
|
||||
|
||||
pub struct LabelsContent {
|
||||
pub name: String,
|
||||
pub value: String,
|
||||
@@ -147,6 +153,25 @@ pub struct KubernetesServiceStatusLoadBalancerIngress {
|
||||
pub hostname: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesIngressStatus {
|
||||
pub load_balancer: KubernetesIngressStatusLoadBalancer,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesIngressStatusLoadBalancer {
|
||||
pub ingress: Vec<KubernetesIngressStatusLoadBalancerIngress>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesIngressStatusLoadBalancerIngress {
|
||||
pub ip: Option<String>,
|
||||
pub hostname: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesPod {
|
||||
|
||||
@@ -50,6 +50,7 @@ use std::rc::Rc;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tracing::{span, Level};
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub enum RegionActivationStatus {
|
||||
@@ -1038,36 +1039,30 @@ pub fn environment_only_http_server_router(context: &Context, test_domain: &str)
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if stick session are activated on given routers via cookie.
|
||||
pub fn routers_sessions_are_sticky(routers: Vec<Router>) -> bool {
|
||||
/// Test if stick sessions are activated on given routers via cookie.
|
||||
pub fn session_is_sticky(url: Url, host: String, max_age: u32) -> bool {
|
||||
let mut is_ok = true;
|
||||
let http_client = reqwest::blocking::Client::builder()
|
||||
.danger_accept_invalid_certs(true) // this test ignores certificate validity (not its purpose)
|
||||
.build()
|
||||
.expect("Cannot build reqwest client");
|
||||
|
||||
for router in routers.iter() {
|
||||
for route in router.routes.iter() {
|
||||
let http_request_result = http_client
|
||||
.get(format!("https://{}{}", router.default_domain, route.path))
|
||||
.send();
|
||||
let http_request_result = http_client.get(url.to_string()).header("Host", host.as_str()).send();
|
||||
|
||||
if http_request_result.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let http_response = http_request_result.expect("cannot retrieve HTTP request result");
|
||||
|
||||
is_ok &= match http_response.headers().get("Set-Cookie") {
|
||||
None => false,
|
||||
Some(value) => match value.to_str() {
|
||||
Err(_) => false,
|
||||
Ok(s) => s.contains("INGRESSCOOKIE_QOVERY=") && s.contains("Max-Age=85400"),
|
||||
},
|
||||
};
|
||||
}
|
||||
if http_request_result.is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let http_response = http_request_result.expect("cannot retrieve HTTP request result");
|
||||
|
||||
is_ok &= match http_response.headers().get("Set-Cookie") {
|
||||
None => false,
|
||||
Some(value) => match value.to_str() {
|
||||
Err(_) => false,
|
||||
Ok(s) => s.contains("INGRESSCOOKIE_QOVERY=") && s.contains(format!("Max-Age={}", max_age).as_str()),
|
||||
},
|
||||
};
|
||||
|
||||
is_ok
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
extern crate test_utilities;
|
||||
|
||||
use self::test_utilities::common::{routers_sessions_are_sticky, Infrastructure};
|
||||
use self::test_utilities::common::{session_is_sticky, Infrastructure};
|
||||
use self::test_utilities::utilities::{
|
||||
engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, logger, FuncTestsSecrets,
|
||||
};
|
||||
@@ -10,12 +10,12 @@ use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs;
|
||||
use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType};
|
||||
use qovery_engine::transaction::TransactionResult;
|
||||
use qovery_engine::utilities::to_short_id;
|
||||
use retry::delay::Fibonacci;
|
||||
use std::collections::BTreeMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use test_utilities::aws::aws_default_engine_config;
|
||||
use test_utilities::utilities::{context, init, kubernetes_config_path};
|
||||
use tracing::{span, Level};
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(feature = "test-aws-minimal")]
|
||||
@@ -998,10 +998,56 @@ fn aws_eks_deploy_a_working_environment_with_sticky_session() {
|
||||
let ret = environment.deploy_environment(&env_action, logger.clone(), &engine_config);
|
||||
assert!(matches!(ret, TransactionResult::Ok));
|
||||
|
||||
// let time for nginx to reload the config
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
// checking if cookie is properly set on the app
|
||||
assert!(routers_sessions_are_sticky(environment.routers));
|
||||
// checking cookie is properly set on the app
|
||||
let kubeconfig = kubernetes_config_path(engine_config.context().clone(), Kind::Aws, "/tmp", secrets)
|
||||
.expect("cannot get kubeconfig");
|
||||
let router = environment
|
||||
.routers
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_router_domain(engine_config.context(), engine_config.cloud_provider(), logger.clone())
|
||||
.unwrap();
|
||||
let environment_domain = environment
|
||||
.to_environment_domain(
|
||||
engine_config.context(),
|
||||
engine_config.cloud_provider(),
|
||||
engine_config.container_registry().registry_info(),
|
||||
logger.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// let some time for ingress to get its IP or hostname
|
||||
// Sticky session is checked on ingress IP or hostname so we are not subjects to long DNS propagation making test less flacky.
|
||||
let ingress = retry::retry(Fibonacci::from_millis(15000).take(8), || {
|
||||
match qovery_engine::cmd::kubectl::kubectl_exec_get_external_ingress(
|
||||
&kubeconfig,
|
||||
environment_domain.namespace(),
|
||||
router.sanitized_name().as_str(),
|
||||
engine_config.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
Ok(res) => match res {
|
||||
Some(res) => retry::OperationResult::Ok(res),
|
||||
None => retry::OperationResult::Retry("ingress not found"),
|
||||
},
|
||||
Err(_) => retry::OperationResult::Retry("cannot get ingress"),
|
||||
}
|
||||
})
|
||||
.expect("cannot get ingress");
|
||||
let ingress_host = ingress
|
||||
.ip
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| ingress.hostname.as_ref().expect("ingress has no IP nor hostname"));
|
||||
|
||||
for router in environment.routers.iter() {
|
||||
for route in router.routes.iter() {
|
||||
assert!(session_is_sticky(
|
||||
Url::parse(format!("http://{}{}", ingress_host.to_string(), route.path).as_str())
|
||||
.expect("cannot parse URL"),
|
||||
router.default_domain.clone(),
|
||||
85400,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let ret = environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete);
|
||||
assert!(matches!(ret, TransactionResult::Ok));
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
extern crate test_utilities;
|
||||
|
||||
use self::test_utilities::common::routers_sessions_are_sticky;
|
||||
use self::test_utilities::common::session_is_sticky;
|
||||
use self::test_utilities::digitalocean::{clean_environments, DO_TEST_REGION};
|
||||
use self::test_utilities::utilities::{
|
||||
engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets,
|
||||
engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, kubernetes_config_path, logger,
|
||||
FuncTestsSecrets,
|
||||
};
|
||||
use ::function_name::named;
|
||||
use qovery_engine::cloud_provider::Kind;
|
||||
use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType};
|
||||
use qovery_engine::transaction::TransactionResult;
|
||||
use qovery_engine::utilities::to_short_id;
|
||||
use retry::delay::Fibonacci;
|
||||
use std::collections::BTreeMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use test_utilities::common::Infrastructure;
|
||||
use test_utilities::digitalocean::do_default_engine_config;
|
||||
use test_utilities::utilities::context;
|
||||
use tracing::{span, warn, Level};
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
// Note: All those tests relies on a test cluster running on DigitalOcean infrastructure.
|
||||
@@ -892,10 +893,56 @@ fn digitalocean_doks_deploy_a_working_environment_with_sticky_session() {
|
||||
let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config);
|
||||
assert!(matches!(result, TransactionResult::Ok));
|
||||
|
||||
// let time for nginx to reload the config
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
// checking cookie is properly set on the app
|
||||
assert!(routers_sessions_are_sticky(environment.routers.clone()));
|
||||
let kubeconfig = kubernetes_config_path(engine_config.context().clone(), Kind::Do, "/tmp", secrets.clone())
|
||||
.expect("cannot get kubeconfig");
|
||||
let router = environment
|
||||
.routers
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_router_domain(engine_config.context(), engine_config.cloud_provider(), logger.clone())
|
||||
.unwrap();
|
||||
let environment_domain = environment
|
||||
.to_environment_domain(
|
||||
engine_config.context(),
|
||||
engine_config.cloud_provider(),
|
||||
engine_config.container_registry().registry_info(),
|
||||
logger.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// let some time for ingress to get its IP or hostname
|
||||
// Sticky session is checked on ingress IP or hostname so we are not subjects to long DNS propagation making test less flacky.
|
||||
let ingress = retry::retry(Fibonacci::from_millis(15000).take(8), || {
|
||||
match qovery_engine::cmd::kubectl::kubectl_exec_get_external_ingress(
|
||||
&kubeconfig,
|
||||
environment_domain.namespace(),
|
||||
router.sanitized_name().as_str(),
|
||||
engine_config.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
Ok(res) => match res {
|
||||
Some(res) => retry::OperationResult::Ok(res),
|
||||
None => retry::OperationResult::Retry("ingress not found"),
|
||||
},
|
||||
Err(_) => retry::OperationResult::Retry("cannot get ingress"),
|
||||
}
|
||||
})
|
||||
.expect("cannot get ingress");
|
||||
let ingress_host = ingress
|
||||
.ip
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| ingress.hostname.as_ref().expect("ingress has no IP nor hostname"));
|
||||
|
||||
for router in environment.routers.iter() {
|
||||
for route in router.routes.iter() {
|
||||
assert!(session_is_sticky(
|
||||
Url::parse(format!("http://{}{}", ingress_host.to_string(), route.path).as_str())
|
||||
.expect("cannot parse URL"),
|
||||
router.default_domain.clone(),
|
||||
85400,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let result =
|
||||
environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete);
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
extern crate test_utilities;
|
||||
|
||||
use self::test_utilities::common::routers_sessions_are_sticky;
|
||||
use self::test_utilities::common::session_is_sticky;
|
||||
use self::test_utilities::scaleway::{clean_environments, SCW_TEST_ZONE};
|
||||
use self::test_utilities::utilities::{
|
||||
context, engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets,
|
||||
context, engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, kubernetes_config_path,
|
||||
logger, FuncTestsSecrets,
|
||||
};
|
||||
use ::function_name::named;
|
||||
use qovery_engine::cloud_provider::Kind;
|
||||
use qovery_engine::io_models::{Action, CloneForTest, Port, Protocol, Storage, StorageType};
|
||||
use qovery_engine::transaction::TransactionResult;
|
||||
use qovery_engine::utilities::to_short_id;
|
||||
use retry::delay::Fibonacci;
|
||||
use std::collections::BTreeMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use test_utilities::common::Infrastructure;
|
||||
use test_utilities::scaleway::scw_default_engine_config;
|
||||
use tracing::{span, warn, Level};
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
// Note: All those tests relies on a test cluster running on Scaleway infrastructure.
|
||||
@@ -1002,10 +1003,56 @@ fn scaleway_kapsule_deploy_a_working_environment_with_sticky_session() {
|
||||
let result = environment.deploy_environment(&env_action, logger.clone(), &engine_config);
|
||||
assert!(matches!(result, TransactionResult::Ok));
|
||||
|
||||
// let time for nginx to reload the config
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
// checking cookie is properly set on the app
|
||||
assert!(routers_sessions_are_sticky(environment.routers.clone()));
|
||||
let kubeconfig = kubernetes_config_path(engine_config.context().clone(), Kind::Scw, "/tmp", secrets.clone())
|
||||
.expect("cannot get kubeconfig");
|
||||
let router = environment
|
||||
.routers
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_router_domain(engine_config.context(), engine_config.cloud_provider(), logger.clone())
|
||||
.unwrap();
|
||||
let environment_domain = environment
|
||||
.to_environment_domain(
|
||||
engine_config.context(),
|
||||
engine_config.cloud_provider(),
|
||||
engine_config.container_registry().registry_info(),
|
||||
logger.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// let some time for ingress to get its IP or hostname
|
||||
// Sticky session is checked on ingress IP or hostname so we are not subjects to long DNS propagation making test less flacky.
|
||||
let ingress = retry::retry(Fibonacci::from_millis(15000).take(8), || {
|
||||
match qovery_engine::cmd::kubectl::kubectl_exec_get_external_ingress(
|
||||
&kubeconfig,
|
||||
environment_domain.namespace(),
|
||||
router.sanitized_name().as_str(),
|
||||
engine_config.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
Ok(res) => match res {
|
||||
Some(res) => retry::OperationResult::Ok(res),
|
||||
None => retry::OperationResult::Retry("ingress not found"),
|
||||
},
|
||||
Err(_) => retry::OperationResult::Retry("cannot get ingress"),
|
||||
}
|
||||
})
|
||||
.expect("cannot get ingress");
|
||||
let ingress_host = ingress
|
||||
.ip
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| ingress.hostname.as_ref().expect("ingress has no IP nor hostname"));
|
||||
|
||||
for router in environment.routers.iter() {
|
||||
for route in router.routes.iter() {
|
||||
assert!(session_is_sticky(
|
||||
Url::parse(format!("http://{}{}", ingress_host.to_string(), route.path).as_str())
|
||||
.expect("cannot parse URL"),
|
||||
router.default_domain.clone(),
|
||||
85400,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let result =
|
||||
environment_for_delete.delete_environment(&env_action_for_delete, logger, &engine_config_for_delete);
|
||||
|
||||
Reference in New Issue
Block a user