feat: adding aws helm level 1 infra

This commit is contained in:
Pierre Mavro
2021-06-04 09:25:35 +02:00
committed by Pierre Mavro
parent e7ea7bb901
commit 5bfcc8f19a
10 changed files with 322 additions and 212 deletions

View File

@@ -31,4 +31,5 @@ resource "local_file" "kubeconfig" {
filename = "{{ s3_kubeconfig_bucket }}/${var.kubernetes_cluster_id}.yaml"
content = local.kubeconfig
file_permission = "0644"
depends_on = [aws_eks_cluster.eks_cluster]
}

View File

@@ -1,9 +1,15 @@
// todo: manage this directly in the engine because cluster reinstall doesn't update kubeconfig
resource "aws_s3_bucket_object" "upload_kubeconfig" {
bucket = var.s3_bucket_kubeconfig
key = "${var.kubernetes_cluster_id}.yaml"
source = local_file.kubeconfig.filename
server_side_encryption = "AES256"
depends_on = [local_file.kubeconfig, aws_s3_bucket.kubeconfigs_bucket]
depends_on = [
local_file.kubeconfig,
aws_s3_bucket.kubeconfigs_bucket,
aws_eks_cluster.eks_cluster
]
tags = local.tags_eks
}

View File

@@ -1,119 +1,119 @@
locals {
aws_cni_chart_release_name = "aws-vpc-cni"
}
data "external" "is_cni_old_installed_version" {
program = ["./helper.sh", "is_cni_old_installed_version"]
depends_on = [
aws_eks_cluster.eks_cluster,
null_resource.enable_cni_managed_by_helm,
]
}
# On the first boot, it's required to remove the existing CNI to get them managed by helm
resource "null_resource" "enable_cni_managed_by_helm" {
provisioner "local-exec" {
command = <<EOT
./helper.sh enable_cni_managed_by_helm
EOT
environment = {
KUBECONFIG = local_file.kubeconfig.filename
AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
AWS_DEFAULT_REGION = "{{ aws_region }}"
}
}
depends_on = [
aws_eks_cluster.eks_cluster,
]
}
locals {
aws_cni = <<CNI
crd:
create: false
CNI
}
resource "helm_release" "aws_vpc_cni" {
name = local.aws_cni_chart_release_name
chart = "charts/aws-vpc-cni"
namespace = "kube-system"
atomic = true
max_history = 50
values = [
local.aws_cni,
]
set {
name = "image.region"
value = var.region
type = "string"
}
set {
name = "image.pullPolicy"
value = "IfNotPresent"
type = "string"
}
set {
name = "originalMatchLabels"
value = data.external.is_cni_old_installed_version.result.is_cni_old_installed_version
type = "string"
}
# label ENIs
set {
name = "env.CLUSTER_NAME"
value = var.kubernetes_cluster_name
type = "string"
}
## POD ALLOCATION ##
# number of total IP addresses that the daemon should attempt to allocate for pod assignment on the node (init phase)
set {
name = "env.MINIMUM_IP_TARGET"
value = "60"
type = "string"
}
# number of free IP addresses that the daemon should attempt to keep available for pod assignment on the node
set {
name = "env.WARM_IP_TARGET"
value = "10"
type = "string"
}
# maximum number of ENIs that will be attached to the node (k8s recommend to avoid going over 100)
set {
name = "env.MAX_ENI"
value = "100"
type = "string"
}
# Limits
set {
name = "resources.requests.cpu"
value = "50m"
type = "string"
}
set {
name = "forced_upgrade"
value = var.forced_upgrade
type = "string"
}
depends_on = [
aws_eks_cluster.eks_cluster,
null_resource.enable_cni_managed_by_helm,
data.external.is_cni_old_installed_version,
{% if not test_cluster %}
vault_generic_secret.cluster-access,
{% endif %}
]
}
//locals {
// aws_cni_chart_release_name = "aws-vpc-cni"
//}
//
//data "external" "is_cni_old_installed_version" {
// program = ["./helper.sh", "is_cni_old_installed_version"]
// depends_on = [
// aws_eks_cluster.eks_cluster,
// null_resource.enable_cni_managed_by_helm,
// ]
//}
//
//# On the first boot, it's required to remove the existing CNI to get them managed by helm
//resource "null_resource" "enable_cni_managed_by_helm" {
// provisioner "local-exec" {
// command = <<EOT
//./helper.sh enable_cni_managed_by_helm
//EOT
//
// environment = {
// KUBECONFIG = local_file.kubeconfig.filename
// AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
// AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
// AWS_DEFAULT_REGION = "{{ aws_region }}"
// }
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// ]
//}
//
//locals {
// aws_cni = <<CNI
//crd:
// create: false
//CNI
//}
//
//resource "helm_release" "aws_vpc_cni" {
// name = local.aws_cni_chart_release_name
// chart = "charts/aws-vpc-cni"
// namespace = "kube-system"
// atomic = true
// max_history = 50
//
// values = [
// local.aws_cni,
// ]
//
// set {
// name = "image.region"
// value = var.region
// type = "string"
// }
//
// set {
// name = "image.pullPolicy"
// value = "IfNotPresent"
// type = "string"
// }
//
// set {
// name = "originalMatchLabels"
// value = data.external.is_cni_old_installed_version.result.is_cni_old_installed_version
// type = "string"
// }
//
// # label ENIs
// set {
// name = "env.CLUSTER_NAME"
// value = var.kubernetes_cluster_name
// type = "string"
// }
//
// ## POD ALLOCATION ##
// # number of total IP addresses that the daemon should attempt to allocate for pod assignment on the node (init phase)
// set {
// name = "env.MINIMUM_IP_TARGET"
// value = "60"
// type = "string"
// }
//
// # number of free IP addresses that the daemon should attempt to keep available for pod assignment on the node
// set {
// name = "env.WARM_IP_TARGET"
// value = "10"
// type = "string"
// }
//
// # maximum number of ENIs that will be attached to the node (k8s recommend to avoid going over 100)
// set {
// name = "env.MAX_ENI"
// value = "100"
// type = "string"
// }
//
// # Limits
// set {
// name = "resources.requests.cpu"
// value = "50m"
// type = "string"
// }
//
// set {
// name = "forced_upgrade"
// value = var.forced_upgrade
// type = "string"
// }
//
// depends_on = [
// aws_eks_cluster.eks_cluster,
// null_resource.enable_cni_managed_by_helm,
// data.external.is_cni_old_installed_version,
// {% if not test_cluster %}
// vault_generic_secret.cluster-access,
// {% endif %}
// ]
//}

View File

@@ -10,7 +10,7 @@ locals {
"aws_access_key_id": "{{ aws_access_key }}",
"aws_secret_access_key": "{{ aws_secret_key }}",
"external_dns_provider": "{{ external_dns_provider }}",
"dns_email_report": {{ dns_email_report }},
"dns_email_report": "{{ dns_email_report }}",
"acme_server_url": "{{ acme_server_url }}",
"managed_dns_domains_terraform_format": "{{ managed_dns_domains_terraform_format }}",
"cloudflare_api_token": "{{ cloudflare_api_token }}",
@@ -25,8 +25,8 @@ locals {
"loki_storage_config_aws_s3": "s3://${urlencode(aws_iam_access_key.iam_eks_loki.id)}:${urlencode(aws_iam_access_key.iam_eks_loki.secret)}@${var.region}/${aws_s3_bucket.loki_bucket.bucket}",
"aws_iam_loki_storage_key": "${aws_iam_access_key.iam_eks_loki.id}",
"aws_iam_loki_storage_secret": "${aws_iam_access_key.iam_eks_loki.secret}",
"qovery_agent_version": "${data.external.get_agent_version_to_use.result.version},
"qovery_engine_version": "${data.external.get_agent_version_to_use.result.version},
"qovery_agent_version": "${data.external.get_agent_version_to_use.result.version}",
"qovery_engine_version": "${data.external.get_agent_version_to_use.result.version}",
"nats_host_url": "${var.qovery_nats_url}",
"nats_username": "${var.qovery_nats_user}",
"nats_password": "${var.qovery_nats_password}"

View File

@@ -1,13 +1,16 @@
use crate::cloud_provider::helm::{
get_chart_namespace, ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces,
get_chart_namespace, ChartInfo, ChartSetValue, CommonChart, CoreDNSConfigChart, HelmChart, HelmChartNamespaces,
};
use crate::cmd::kubectl::{kubectl_exec_get_daemonset, kubectl_exec_with_output};
use crate::error::{SimpleError, SimpleErrorKind};
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
use std::thread::sleep;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AwsQoveryTerraformConfig {
pub cloud_provider: String,
pub region: String,
@@ -46,16 +49,30 @@ pub struct AwsQoveryTerraformConfig {
pub qovery_engine_version: String,
}
pub fn aws_helm_charts(qovery_terraform_config_file: &str) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
let qovery_terraform_config = match serde_json::from_str::<AwsQoveryTerraformConfig>(qovery_terraform_config_file) {
Ok(x) => x,
pub fn aws_helm_charts(
qovery_terraform_config_file: &str,
chart_prefix_path: Option<&str>,
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, SimpleError> {
let chart_prefix = match chart_prefix_path {
None => "./",
Some(x) => x,
};
let content_file = File::open(&qovery_terraform_config_file)?;
let reader = BufReader::new(content_file);
let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
error!(
"error while parsing terraform config file {}: {:?}",
&qovery_terraform_config_file, &e
);
return Err(SimpleError {
kind: SimpleErrorKind::Other,
message: Some(format!("{:?}", e)),
})
});
}
};
let prometheus_namespace = HelmChartNamespaces::Prometheus;
let loki_namespace = HelmChartNamespaces::Logging;
let loki_service_name = "loki".to_string();
@@ -64,15 +81,15 @@ pub fn aws_helm_charts(qovery_terraform_config_file: &str) -> Result<Vec<Vec<Box
let q_storage_class = CommonChart {
chart_info: ChartInfo {
name: "q-storageclass".to_string(),
path: "charts/q-storageclass".to_string(),
path: format!("{}/charts/q-storageclass", &chart_prefix),
..Default::default()
},
};
let aws_vpc_cni_chart = AwsVpcCniChart {
chart_info: ChartInfo {
name: "aws-vpc-cni".to_string(),
path: "charts/aws-vpc-cni".to_string(),
name: "aws-node".to_string(),
path: format!("{}/charts/aws-vpc-cni", &chart_prefix),
values: vec![
ChartSetValue {
key: "image.region".to_string(),
@@ -271,10 +288,10 @@ pub fn aws_helm_charts(qovery_terraform_config_file: &str) -> Result<Vec<Vec<Box
},
};
let coredns_config = CommonChart {
let coredns_config = CoreDNSConfigChart {
chart_info: ChartInfo {
name: "coredns-config".to_string(),
path: "charts/coredns-config".to_string(),
name: "coredns".to_string(),
path: format!("{}/charts/coredns-config", &chart_prefix),
values: vec![
ChartSetValue {
key: "managed_dns".to_string(),
@@ -951,39 +968,41 @@ pub fn aws_helm_charts(qovery_terraform_config_file: &str) -> Result<Vec<Vec<Box
let mut level_2: Vec<Box<dyn HelmChart>> = vec![];
let mut level_3: Vec<Box<dyn HelmChart>> = vec![
Box::new(cluster_autoscaler),
Box::new(aws_iam_eks_user_mapper),
Box::new(aws_calico),
// Box::new(cluster_autoscaler),
// Box::new(aws_iam_eks_user_mapper),
// Box::new(aws_calico),
];
let mut level_4: Vec<Box<dyn HelmChart>> = vec![
Box::new(metric_server),
Box::new(aws_node_term_handler),
Box::new(external_dns),
// Box::new(metric_server),
// Box::new(aws_node_term_handler),
// Box::new(external_dns),
];
let level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress), Box::new(cert_manager), Box::new(pleco)];
let level_5: Vec<Box<dyn HelmChart>> = vec![
// Box::new(nginx_ingress), Box::new(cert_manager), Box::new(pleco)
];
let mut level_6: Vec<Box<dyn HelmChart>> = vec![
Box::new(cert_manager_config),
Box::new(qovery_agent),
Box::new(qovery_engine),
// Box::new(cert_manager_config),
// Box::new(qovery_agent),
// Box::new(qovery_engine),
];
if &qovery_terraform_config.feature_flag_metrics_history == "true" {
level_2.push(Box::new(prometheus_operator));
level_4.push(Box::new(prometheus_adapter));
}
if &qovery_terraform_config.feature_flag_log_history == "true" {
level_3.push(Box::new(promtail));
level_4.push(Box::new(loki));
}
if &qovery_terraform_config.feature_flag_metrics_history == "true"
|| &qovery_terraform_config.feature_flag_log_history == "true"
{
level_6.push(Box::new(grafana))
};
// if &qovery_terraform_config.feature_flag_metrics_history == "true" {
// level_2.push(Box::new(prometheus_operator));
// level_4.push(Box::new(prometheus_adapter));
// }
// if &qovery_terraform_config.feature_flag_log_history == "true" {
// level_3.push(Box::new(promtail));
// level_4.push(Box::new(loki));
// }
//
// if &qovery_terraform_config.feature_flag_metrics_history == "true"
// || &qovery_terraform_config.feature_flag_log_history == "true"
// {
// level_6.push(Box::new(grafana))
// };
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6])
}
@@ -1024,7 +1043,7 @@ impl HelmChart for AwsVpcCniChart {
environment_variables.clone(),
|_| {},
|_| {},
)?;
);
kubectl_exec_with_output(
vec![
"-n",
@@ -1038,7 +1057,7 @@ impl HelmChart for AwsVpcCniChart {
environment_variables.clone(),
|_| {},
|_| {},
)?;
);
kubectl_exec_with_output(
vec![
"-n",
@@ -1052,10 +1071,12 @@ impl HelmChart for AwsVpcCniChart {
environment_variables.clone(),
|_| {},
|_| {},
)?
);
}
info!("AWS CNI successfully deployed")
info!("AWS CNI successfully deployed");
// sleep in order to be sure the daemonset is updated
sleep(Duration::from_secs(20))
}
false => info!("AWS CNI is already supported by Helm, nothing to do"),
};
@@ -1068,13 +1089,27 @@ impl AwsVpcCniChart {
fn enable_cni_managed_by_helm(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> bool {
let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
kubectl_exec_get_daemonset(
match kubectl_exec_get_daemonset(
kubernetes_config,
&self.chart_info.name,
self.namespace().as_str(),
Some("k8s-app=aws-node,app.kubernetes.io/managed-by=Helm"),
environment_variables,
)
.is_ok()
) {
Ok(x) => {
if x.items.is_empty() {
true
} else {
false
}
}
Err(e) => {
error!(
"error while getting daemonset info for chart {}, won't deploy CNI cahrt. {:?}",
&self.chart_info.name, e
);
false
}
}
}
}

View File

@@ -790,7 +790,7 @@ impl<'a> Kubernetes for EKS<'a> {
let helm_charts_to_deploy = cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
aws_helm_charts(kubeconfig.as_path().to_str().unwrap()),
aws_helm_charts(format!("{}/qovery-tf-config.json", &temp_dir).as_str(), Some(&temp_dir)),
)?;
cast_simple_error_to_engine_error(

View File

@@ -1,7 +1,7 @@
use crate::cloud_provider::helm::HelmAction::Deploy;
use crate::cloud_provider::helm::HelmChartNamespaces::KubeSystem;
use crate::cmd::helm::{helm_exec_uninstall_with_chart_info, helm_exec_upgrade_with_chart_info};
use crate::cmd::kubectl::kubectl_exec_rollout_restart_deployment;
use crate::cmd::kubectl::{kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output};
use crate::error::{SimpleError, SimpleErrorKind};
use std::path::Path;
use std::{fs, thread};
@@ -102,7 +102,6 @@ pub trait HelmChart: Send {
}
fn pre_exec(&self, _kubernetes_config: &Path, _envs: &[(String, String)]) -> Result<(), SimpleError> {
//
Ok(())
}
@@ -112,8 +111,12 @@ pub trait HelmChart: Send {
match self.exec(&kubernetes_config, &envs) {
Ok(_) => {}
Err(e) => {
error!("Error while deploying chart: {:?}", e.message);
return self.on_deploy_failure(&kubernetes_config, &envs);
error!(
"Error while deploying chart: {:?}",
e.message.clone().expect("no message provided")
);
self.on_deploy_failure(&kubernetes_config, &envs);
return Err(e);
}
};
self.post_exec(&kubernetes_config, &envs)?;
@@ -218,6 +221,57 @@ impl HelmChart for CoreDNSConfigChart {
&self.chart_info
}
fn pre_exec(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<(), SimpleError> {
let kind = "configmap";
let mut environment_variables: Vec<(&str, &str)> = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();
environment_variables.push(("KUBECONFIG", kubernetes_config.to_str().unwrap()));
info!("setting annotations and labels on {}/{}", &kind, &self.chart_info.name);
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
format!("meta.helm.sh/release-name={}", self.chart_info.name).as_str(),
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"annotate",
"--overwrite",
&kind,
&self.chart_info.name,
"meta.helm.sh/release-namespace=kube-system",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
kubectl_exec_with_output(
vec![
"-n",
"kube-system",
"label",
"--overwrite",
&kind,
&self.chart_info.name,
"app.kubernetes.io/managed-by=Helm",
],
environment_variables.clone(),
|_| {},
|_| {},
)?;
Ok(())
}
// todo: it would be better to avoid rebooting coredns on every run
fn post_exec(&self, kubernetes_config: &Path, envs: &[(String, String)]) -> Result<(), SimpleError> {
let environment_variables = envs.iter().map(|x| (x.0.as_str(), x.1.as_str())).collect();

View File

@@ -115,31 +115,46 @@ where
let mut json_output_string = String::new();
let mut error_message = String::new();
let mut helm_error_during_deployment = SimpleError {
kind: SimpleErrorKind::Other,
message: None,
};
match helm_exec_with_output(
args,
envs.clone(),
|out| match out {
Ok(line) => json_output_string = line,
Err(err) => error!("{:?}", err),
Err(err) => error!("{}", &err),
},
|out| match out {
Ok(line) => {
// helm errors are not json formatted unfortunately
if line.contains("has been rolled back") {
error_message = format!("Deployment {} has been rolled back", chart.name);
error_message = format!("deployment {} has been rolled back", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
} else if line.contains("has been uninstalled") {
error_message = format!("Deployment {} has been uninstalled due to failure", chart.name);
error_message = format!("deployment {} has been uninstalled due to failure", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
warn!("{}. {}", &error_message, &line);
} else {
error_message = format!("Deployment {} has failed", chart.name);
warn!("{}. {}", &error_message, &line);
error_message = format!("deployment {} has failed", chart.name);
helm_error_during_deployment.message = Some(error_message.clone());
error!("{}. {}", &error_message, &line);
}
}
Err(err) => error!("{:?}", err),
Err(err) => {
error_message = format!("helm chart {} failed before deployment. {:?}", chart.name, err);
helm_error_during_deployment.message = Some(error_message.clone());
error!("{}", error_message);
}
},
) {
Ok(_) => Ok(()),
Ok(_) => {
if helm_error_during_deployment.message.is_some() {
return Err(helm_error_during_deployment);
}
}
Err(e) => {
return Err(SimpleError {
kind: SimpleErrorKind::Other,
@@ -147,6 +162,8 @@ where
})
}
}
Ok(())
}
pub fn helm_exec_upgrade<P>(

View File

@@ -9,12 +9,13 @@ use serde::de::DeserializeOwned;
use crate::cloud_provider::digitalocean::models::svc::DOKubernetesList;
use crate::cloud_provider::metrics::KubernetesApiMetrics;
use crate::cmd::structs::{
Item, KubernetesEvent, KubernetesJob, KubernetesKind, KubernetesList, KubernetesNode, KubernetesPod,
Daemonset, Item, KubernetesEvent, KubernetesJob, KubernetesKind, KubernetesList, KubernetesNode, KubernetesPod,
KubernetesPodStatusPhase, KubernetesService, KubernetesVersion, LabelsContent,
};
use crate::cmd::utilities::exec_with_envs_and_output;
use crate::constants::KUBECONFIG;
use crate::error::{SimpleError, SimpleErrorKind};
use itertools::Itertools;
pub enum ScalingKind {
Deployment,
@@ -33,14 +34,19 @@ where
{
match exec_with_envs_and_output(
"kubectl",
args,
envs,
args.clone(),
envs.clone(),
stdout_output,
stderr_output,
Duration::max_value(),
) {
Err(err) => return Err(err),
_ => {}
Err(err) => {
let args_string = args.join(" ");
let msg = format!("Error on command: kubectl {}. {:?}", args_string, &err);
error!("{}", &msg);
return Err(err);
}
Ok(_) => {}
};
Ok(())
@@ -703,22 +709,22 @@ pub fn kubectl_exec_get_daemonset<P>(
namespace: &str,
selectors: Option<&str>,
envs: Vec<(&str, &str)>,
) -> Result<KubernetesList<KubernetesNode>, SimpleError>
) -> Result<Daemonset, SimpleError>
where
P: AsRef<Path>,
{
let mut args = vec!["-n", namespace, "get", "daemonset", name];
let mut args = vec!["-n", namespace, "get", "daemonset"];
match selectors {
Some(x) => {
args.push("-l");
args.push(x);
}
None => {}
None => args.push(name),
};
args.push("-o");
args.push("json");
kubectl_exec::<P, KubernetesList<KubernetesNode>>(args, kubernetes_config, envs)
kubectl_exec::<P, Daemonset>(args, kubernetes_config, envs)
}
pub fn kubectl_exec_rollout_restart_deployment<P>(
@@ -931,8 +937,8 @@ where
let mut output_vec: Vec<String> = Vec::with_capacity(50);
let _ = kubectl_exec_with_output(
args,
_envs,
args.clone(),
_envs.clone(),
|out| match out {
Ok(line) => output_vec.push(line),
Err(err) => error!("{:?}", err),
@@ -948,7 +954,20 @@ where
let result = match serde_json::from_str::<T>(output_string.as_str()) {
Ok(x) => x,
Err(err) => {
error!("{:?}", err);
let args_string = args.join(" ");
let mut env_vars_in_vec = Vec::new();
let _ = _envs.into_iter().map(|x| {
env_vars_in_vec.push(x.0.to_string());
env_vars_in_vec.push(x.1.to_string());
});
let environment_variables = env_vars_in_vec.join(" ");
error!(
"json parsing error on {:?} on command: {} kubectl {}. {:?}",
std::any::type_name::<T>(),
environment_variables,
args_string,
err
);
error!("{}", output_string.as_str());
return Err(SimpleError::new(SimpleErrorKind::Other, Some(output_string)));
}

View File

@@ -12,29 +12,11 @@ pub struct KubernetesService {
pub status: KubernetesServiceStatus,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Labels {
pub name: String,
}
pub struct LabelsContent {
pub name: String,
pub value: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Spec {
pub finalizers: Vec<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Status {
pub phase: String,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Metadata2 {
@@ -48,15 +30,12 @@ pub struct Item {
pub api_version: String,
pub kind: String,
pub metadata: Metadata,
pub spec: Spec,
pub status: Status,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Metadata {
pub creation_timestamp: String,
pub labels: Option<Labels>,
pub name: String,
pub resource_version: String,
pub self_link: String,
@@ -69,7 +48,6 @@ pub struct Daemonset {
pub api_version: String,
pub items: Vec<Item>,
pub kind: String,
pub metadata: Metadata,
}
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]