Merge branch 'dev' into dumb_PR

This commit is contained in:
Erèbe - Romain Gerard
2022-05-10 11:03:05 +02:00
committed by GitHub
378 changed files with 23210 additions and 20266 deletions

15
.github/workflows/charts.yaml vendored Normal file
View File

@@ -0,0 +1,15 @@
name: release-chart
on:
push:
branches: ['dev', 'main']
jobs:
release-chart:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: lib/common/bootstrap/charts/qovery/

View File

@@ -17,30 +17,34 @@ jobs:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
toolchain: 1.60.0
components: rustfmt, clippy
- uses: hashicorp/setup-terraform@v1
with:
terraform_version: 0.14.10
- uses: sergeysova/jq-action@v2
id: sccache_release
with:
cmd: 'curl --silent "https://api.github.com/repos/Qovery/sccache-bin/releases/latest" | jq .tag_name'
multiline: false
- name: build-linter-utests
run: |
echo "########## LINTER ##########"
cargo fmt --all -- --check --color=always || (echo "Use cargo fmt to format your code"; exit 1)
rustup component add clippy
cargo clippy --locked --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1)
cargo clippy --all --all-features --lib -- -D warnings || (echo "Solve your clippy warnings to continue"; exit 1)
export PATH=$GITHUB_WORKSPACE/bin:$PATH
export RUSTC_WRAPPER=$GITHUB_WORKSPACE/bin/sccache
export SCCACHE_REDIS=${{ secrets.SCCACHE_REDIS }}
export TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache
mkdir -p $GITHUB_WORKSPACE/bin $HOME/.terraform.d/plugin-cache
sccache_release=$(curl --silent "https://github.com/Qovery/sccache-bin/releases/latest" | sed -r 's/^.+tag\/(.+)">.+/\1/')
curl -sLo $GITHUB_WORKSPACE/bin/sccache https://github.com/Qovery/sccache-bin/releases/download/${sccache_release}/sccache
curl -sLo $GITHUB_WORKSPACE/bin/sccache https://github.com/Qovery/sccache-bin/releases/download/${{ steps.sccache_release.outputs.value }}/sccache
chmod 755 $GITHUB_WORKSPACE/bin/sccache
echo "########## SHARED CACHE STATUS ##########"
sccache --version
sccache --show-stats
echo "########## START BUILD ##########"
cargo build --all-features
cargo build --all-features --tests
sccache --show-stats
echo "########## START UNIT TESTS ##########"
cargo test

34
Cargo.lock generated
View File

@@ -493,6 +493,17 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
[[package]]
name = "derivative"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
dependencies = [
"proc-macro2 1.0.28",
"quote 1.0.9",
"syn 1.0.74",
]
[[package]]
name = "deunicode"
version = "0.4.3"
@@ -2076,6 +2087,7 @@ dependencies = [
"base64 0.13.0",
"chrono",
"cmd_lib",
"derivative",
"digitalocean",
"dirs",
"flate2",
@@ -2103,6 +2115,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"serde_yaml",
"strum",
"strum_macros",
"sysinfo",
@@ -2953,6 +2966,18 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_yaml"
version = "0.8.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0"
dependencies = [
"indexmap",
"ryu",
"serde",
"yaml-rust",
]
[[package]]
name = "sha-1"
version = "0.8.2"
@@ -4199,6 +4224,15 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3"
[[package]]
name = "yaml-rust"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
dependencies = [
"linked-hash-map",
]
[[package]]
name = "zeroize"
version = "1.4.1"

View File

@@ -8,75 +8,78 @@ edition = "2018"
[dependencies]
chrono = "0.4.19"
cmd_lib = "1.0.13"
git2 = "0.14.2"
cmd_lib = "1.3.0"
derivative = "2.2.0"
git2 = "0.14.3"
walkdir = "2.3.2"
itertools = "0.10.0"
itertools = "0.10.3"
base64 = "0.13.0"
dirs = "3.0.2"
dirs = "4.0.0"
rust-crypto = "0.2.36"
retry = "1.2.1"
trust-dns-resolver = "0.20.3"
rand = "0.8.3"
semver = "1.0.4"
gethostname = "0.2.1"
reqwest = { version = "0.11.3", features = ["blocking", "json"] }
futures = "0.3.15"
retry = "1.3.1"
trust-dns-resolver = "0.21.2"
rand = "0.8.5"
semver = "1.0.9"
gethostname = "0.2.3"
reqwest = { version = "0.11.10", features = ["blocking", "json"] }
futures = "0.3.21"
timeout-readwrite = "0.3.1"
lazy_static = "1.4.0"
uuid = { version = "0.8", features = ["v4", "serde"] }
uuid = { version = "1.0.0", features = ["v4", "serde"] }
url = "2.2.2"
function_name = "0.2.0"
thiserror = "1.0.30"
strum = "0.23"
strum_macros = "0.23"
function_name = "0.2.1"
thiserror = "1.0.31"
strum = "0.24.0"
strum_macros = "0.24.0"
urlencoding = "2.1.0"
# FIXME use https://crates.io/crates/blocking instead of runtime.rs
# tar gz
flate2 = "1.0.20" # tar gz
tar = ">=0.4.36"
flate2 = "1.0.23"
tar = "0.4.38"
# logger
tracing = "0.1.26"
tracing-subscriber = "0.2.18"
tracing = "0.1.34"
tracing-subscriber = "0.3.11"
# Docker deps
# shiplift = "0.6.0"
# Filesystem
sysinfo = "0.18.2"
sysinfo = "0.23.11"
# Jinja2
tera = "1.10.0"
tera = "1.15.0"
# Json
serde = "1.0.126"
serde_json = "1.0.64"
serde_derive = "1.0.126"
serde = "1.0.137"
serde_json = "1.0.81"
serde_derive = "1.0.137"
serde_yaml = "0.8.24"
# AWS deps
tokio = { version = "1.10.0", features = ["full"] }
rusoto_core = "0.47.0"
rusoto_sts = "0.47.0"
rusoto_credential = "0.47.0"
rusoto_ecr = "0.47.0"
rusoto_eks = "0.47.0"
rusoto_s3 = "0.47.0"
rusoto_dynamodb = "0.47.0"
rusoto_iam = "0.47.0"
tokio = { version = "1.18.1", features = ["full"] }
rusoto_core = "0.48.0"
rusoto_sts = "0.48.0"
rusoto_credential = "0.48.0"
rusoto_ecr = "0.48.0"
rusoto_eks = "0.48.0"
rusoto_s3 = "0.48.0"
rusoto_dynamodb = "0.48.0"
rusoto_iam = "0.48.0"
# Digital Ocean Deps
digitalocean = "0.1.1"
# Scaleway Deps
scaleway_api_rs = "=0.1.2"
scaleway_api_rs = "0.1.2"
[dev-dependencies]
test-utilities = { path = "test_utilities" }
tempdir = "0.3"
tempfile = "3.2.0"
tempdir = "0.3.7"
tempfile = "3.3.0"
maplit = "1.0.2"
tracing-test = "0.1.0"
tracing-test = "0.2.1"
[features]
default = []
@@ -100,9 +103,10 @@ test-scw-managed-services = []
test-all-managed-services = ["test-aws-managed-services", "test-do-managed-services", "test-scw-managed-services"]
test-aws-infra = []
test-aws-infra-ec2 = []
test-do-infra = []
test-scw-infra = []
test-all-infra = ["test-aws-infra", "test-do-infra", "test-scw-infra"]
test-all-infra = ["test-aws-infra", "test-aws-infra-ec2", "test-do-infra", "test-scw-infra"]
test-aws-whole-enchilada = []
test-do-whole-enchilada = []
@@ -110,7 +114,7 @@ test-scw-whole-enchilada = []
test-all-whole-enchilada = ["test-aws-whole-enchilada", "test-do-whole-enchilada", "test-scw-whole-enchilada"]
# functionnal tests by provider
test-aws-all = ["test-aws-infra", "test-aws-managed-services", "test-aws-self-hosted", "test-aws-whole-enchilada"]
test-aws-all = ["test-aws-infra", "test-aws-infra-ec2", "test-aws-managed-services", "test-aws-self-hosted", "test-aws-whole-enchilada"]
test-do-all = ["test-do-infra", "test-do-managed-services", "test-do-self-hosted", "test-do-whole-enchilada"]
test-scw-all = ["test-scw-infra", "test-scw-managed-services", "test-scw-self-hosted", "test-scw-whole-enchilada"]

View File

@@ -0,0 +1,10 @@
terraform {
backend "s3" {
access_key = "{{ aws_access_key_tfstates_account }}"
secret_key = "{{ aws_secret_key_tfstates_account }}"
bucket = "{{ aws_terraform_backend_bucket }}"
key = "{{ kubernetes_cluster_id }}/{{ aws_terraform_backend_bucket }}.tfstate"
dynamodb_table = "{{ aws_terraform_backend_dynamodb_table }}"
region = "{{ aws_region_tfstates_account }}"
}
}

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: coredns-config
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: 0.1

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "coredns-config.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "coredns-config.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "coredns-config.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "coredns-config.labels" -}}
helm.sh/chart: {{ include "coredns-config.chart" . }}
{{ include "coredns-config.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "coredns-config.selectorLabels" -}}
app.kubernetes.io/name: {{ include "coredns-config.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "coredns-config.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "coredns-config.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,31 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: coredns
namespace: kube-system
labels:
eks.amazonaws.com/component: coredns
k8s-app: kube-dns
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
{{- range .Values.managed_dns }}
{{ . }}:53 {
errors
cache 30
forward . {{ join " " $.Values.managed_dns_resolvers }}
}
{{ end }}

View File

@@ -0,0 +1,4 @@
# List of managed DNS
managed_dns: []
# List of resolvers
managed_dns_resolvers: []

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: q-storageclass
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 0.1

View File

@@ -0,0 +1,63 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "q-ebs-csi-config.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "q-ebs-csi-config.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "q-ebs-csi-config.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "q-ebs-csi-config.labels" -}}
helm.sh/chart: {{ include "q-ebs-csi-config.chart" . }}
{{ include "q-ebs-csi-config.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "q-ebs-csi-config.selectorLabels" -}}
app.kubernetes.io/name: {{ include "q-ebs-csi-config.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "q-ebs-csi-config.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "q-ebs-csi-config.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,64 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: aws-ebs-gp2-0
labels:
aws-type: "gp2"
qovery-type: "ssd"
reclaim: "0"
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
encrypted: 'true'
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Delete
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: aws-ebs-io1-0
labels:
aws-type: "io1"
qovery-type: "nvme"
reclaim: "0"
provisioner: kubernetes.io/aws-ebs
parameters:
type: io1
iopsPerGB: "32"
encrypted: 'true'
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Delete
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: aws-ebs-st1-0
labels:
aws-type: "st1"
qovery-type: "hdd"
reclaim: "0"
provisioner: kubernetes.io/aws-ebs
parameters:
type: st1
encrypted: 'true'
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Delete
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: aws-ebs-sc1-0
labels:
aws-type: "sc1"
qovery-type: "cold"
reclaim: "0"
provisioner: kubernetes.io/aws-ebs
parameters:
type: sc1
encrypted: 'true'
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Delete

View File

@@ -0,0 +1,69 @@
locals {
tags_documentdb = merge(
aws_instance.ec2_instance.tags,
{
"Service" = "DocumentDB"
}
)
}
# Network
resource "aws_subnet" "documentdb_zone_a" {
count = length(var.documentdb_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.documentdb_subnets_zone_a[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_documentdb
}
resource "aws_subnet" "documentdb_zone_b" {
count = length(var.documentdb_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.documentdb_subnets_zone_b[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_documentdb
}
resource "aws_subnet" "documentdb_zone_c" {
count = length(var.documentdb_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.documentdb_subnets_zone_c[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_documentdb
}
resource "aws_route_table_association" "documentdb_cluster_zone_a" {
count = length(var.documentdb_subnets_zone_a)
subnet_id = aws_subnet.documentdb_zone_a.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "documentdb_cluster_zone_b" {
count = length(var.documentdb_subnets_zone_b)
subnet_id = aws_subnet.documentdb_zone_b.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "documentdb_cluster_zone_c" {
count = length(var.documentdb_subnets_zone_c)
subnet_id = aws_subnet.documentdb_zone_c.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_docdb_subnet_group" "documentdb" {
description = "DocumentDB linked to ${var.kubernetes_cluster_id}"
name = "documentdb-${aws_vpc.ec2.id}"
subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id])
tags = local.tags_documentdb
}

View File

@@ -0,0 +1,38 @@
# randomize inbound kubernetes port number for more security
resource "random_integer" "kubernetes_external_port" {
min = 1024
max = 65534
}
resource "aws_security_group" "ec2_instance" {
name = "qovery-ec2-${var.kubernetes_cluster_id}"
description = "Cluster communication with worker nodes"
vpc_id = aws_vpc.ec2.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
// nginx ingress
ingress {
description = "HTTPS connectivity"
from_port = 443
protocol = "tcp"
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
}
// kubernetes
ingress {
description = "Kubernetes connectivity"
from_port = random_integer.kubernetes_external_port.result
protocol = "tcp"
to_port = random_integer.kubernetes_external_port.result
cidr_blocks = ["0.0.0.0/0"]
}
tags = local.tags_ec2
}

View File

@@ -0,0 +1,35 @@
data "aws_availability_zones" "available" {}
locals {
tags_ec2_vpc = merge(
local.tags_common,
{
Name = "qovery-ec2-workers",
"kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared",
"kubernetes.io/role/elb" = 1,
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %}
}
)
tags_ec2_vpc_public = merge(
local.tags_ec2_vpc,
{
"Public" = "true"
}
)
}
# VPC
resource "aws_vpc" "ec2" {
cidr_block = var.vpc_cidr_block
enable_dns_hostnames = true
tags = local.tags_ec2_vpc
}
# Internet gateway
resource "aws_internet_gateway" "ec2_instance" {
vpc_id = aws_vpc.ec2.id
tags = local.tags_ec2_vpc
}

View File

@@ -0,0 +1,72 @@
# Public subnets
resource "aws_subnet" "ec2_zone_a" {
count = length(var.ec2_subnets_zone_a_private)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.ec2_subnets_zone_a_private[count.index]
vpc_id = aws_vpc.ec2.id
map_public_ip_on_launch = true
tags = local.tags_ec2_vpc
}
resource "aws_subnet" "ec2_zone_b" {
count = length(var.ec2_subnets_zone_b_private)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.ec2_subnets_zone_b_private[count.index]
vpc_id = aws_vpc.ec2.id
map_public_ip_on_launch = true
tags = local.tags_ec2_vpc
}
resource "aws_subnet" "ec2_zone_c" {
count = length(var.ec2_subnets_zone_c_private)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.ec2_subnets_zone_c_private[count.index]
vpc_id = aws_vpc.ec2.id
map_public_ip_on_launch = true
tags = local.tags_ec2_vpc
}
resource "aws_route_table" "ec2_instance" {
vpc_id = aws_vpc.ec2.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.ec2_instance.id
}
{% for route in vpc_custom_routing_table %}
route {
cidr_block = "{{ route.destination }}"
gateway_id = "{{ route.target }}"
}
{% endfor %}
tags = local.tags_ec2_vpc
}
resource "aws_route_table_association" "ec2_instance_zone_a" {
count = length(var.ec2_subnets_zone_a_private)
subnet_id = aws_subnet.ec2_zone_a.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "ec2_instance_zone_b" {
count = length(var.ec2_subnets_zone_b_private)
subnet_id = aws_subnet.ec2_zone_b.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "ec2_instance_zone_c" {
count = length(var.ec2_subnets_zone_c_private)
subnet_id = aws_subnet.ec2_zone_c.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}

View File

@@ -0,0 +1,87 @@
data "aws_ami" "debian" {
most_recent = true
filter {
name = "name"
values = [var.ec2_image_info.name]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
# to get owner id:
# aws ec2 describe-images --image-ids <ami-id> --region us-west-2 | jq -r '.Images[0].OwnerId'
owners = [var.ec2_image_info.owners]
}
resource "aws_instance" "ec2_instance" {
ami = data.aws_ami.debian.id
instance_type = var.ec2_instance.instance_type
# disk
root_block_device {
volume_size = "30" # GiB
volume_type = "gp2"
encrypted = true
}
# network
associate_public_ip_address = true
# security
vpc_security_group_ids = [aws_security_group.ec2_instance.id]
subnet_id = aws_subnet.ec2_zone_a[0].id
user_data = local.bootstrap
user_data_replace_on_change = true
# lifecycle {
# // user data changes, forces to restart the EC2 instance
# ignore_changes = [user_data]
# }
tags = merge(
local.tags_common,
{
"Service" = "EC2"
}
)
depends_on = [
aws_s3_bucket.kubeconfigs_bucket
]
}
resource "time_static" "on_ec2_create" {}
locals {
bootstrap = <<BOOTSTRAP
#!/bin/bash
export KUBECONFIG_FILENAME="${var.kubernetes_cluster_id}.yaml"
export NEW_KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME"
apt-get update
apt-get -y install curl s3cmd
export INSTALL_K3S_VERSION=${var.k3s_config.version}
export INSTALL_K3S_CHANNEL=${var.k3s_config.channel}
export INSTALL_K3S_EXEC="--https-listen-port=${random_integer.kubernetes_external_port.result} ${var.k3s_config.exec}"
curl -sfL https://get.k3s.io | sh -
echo 'export KUBECONFIG=/etc/rancher/k3s/k3s.yaml' >> /etc/profile
while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do
echo "kubeconfig is not yet present, sleeping"
sleep 1
done
# Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap
public_hostname="$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)"
sed "s/127.0.0.1/$public_hostname/g" /etc/rancher/k3s/k3s.yaml > $NEW_KUBECONFIG_PATH
sed -i "s/:6443/:${random_integer.kubernetes_external_port.result}/g" $NEW_KUBECONFIG_PATH
s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $NEW_KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME
rm -f $NEW_KUBECONFIG_PATH
BOOTSTRAP
}

View File

@@ -0,0 +1,68 @@
locals {
tags_elasticache = merge(
aws_instance.ec2_instance.tags,
{
"Service" = "Elasticache"
}
)
}
# Network
resource "aws_subnet" "elasticache_zone_a" {
count = length(var.elasticache_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.elasticache_subnets_zone_a[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticache
}
resource "aws_subnet" "elasticache_zone_b" {
count = length(var.elasticache_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.elasticache_subnets_zone_b[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticache
}
resource "aws_subnet" "elasticache_zone_c" {
count = length(var.elasticache_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.elasticache_subnets_zone_c[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticache
}
resource "aws_route_table_association" "elasticache_cluster_zone_a" {
count = length(var.elasticache_subnets_zone_a)
subnet_id = aws_subnet.elasticache_zone_a.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "elasticache_cluster_zone_b" {
count = length(var.elasticache_subnets_zone_b)
subnet_id = aws_subnet.elasticache_zone_b.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "elasticache_cluster_zone_c" {
count = length(var.elasticache_subnets_zone_c)
subnet_id = aws_subnet.elasticache_zone_c.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_elasticache_subnet_group" "elasticache" {
description = "Elasticache linked to ${var.kubernetes_cluster_id}"
# WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly
name = "elasticache-${aws_vpc.ec2.id}"
subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id])
}

View File

@@ -0,0 +1,79 @@
locals {
tags_elasticsearch = merge(
local.tags_ec2,
{
"Service" = "Elasticsearch"
}
)
}
# Network
resource "aws_subnet" "elasticsearch_zone_a" {
count = length(var.elasticsearch_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.elasticsearch_subnets_zone_a[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticsearch
}
resource "aws_subnet" "elasticsearch_zone_b" {
count = length(var.elasticsearch_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.elasticsearch_subnets_zone_b[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticsearch
}
resource "aws_subnet" "elasticsearch_zone_c" {
count = length(var.elasticsearch_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.elasticsearch_subnets_zone_c[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_elasticsearch
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_a" {
count = length(var.elasticsearch_subnets_zone_a)
subnet_id = aws_subnet.elasticsearch_zone_a.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_b" {
count = length(var.elasticsearch_subnets_zone_b)
subnet_id = aws_subnet.elasticsearch_zone_b.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "elasticsearch_cluster_zone_c" {
count = length(var.elasticsearch_subnets_zone_c)
subnet_id = aws_subnet.elasticsearch_zone_c.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_security_group" "elasticsearch" {
name = "elasticsearch-${var.kubernetes_cluster_id}"
description = "Elasticsearch security group"
vpc_id = aws_vpc.ec2.id
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [
aws_vpc.ec2.cidr_block
]
}
tags = local.tags_elasticsearch
}

View File

@@ -0,0 +1,14 @@
locals {
qovery_tf_config = <<TF_CONFIG
{
"aws_ec2_public_hostname": "${aws_instance.ec2_instance.public_dns}",
"aws_ec2_kubernetes_port": "${random_integer.kubernetes_external_port.result}"
}
TF_CONFIG
}
resource "local_file" "qovery_tf_config" {
filename = "qovery-tf-config.json"
content = local.qovery_tf_config
file_permission = "0600"
}

View File

@@ -0,0 +1,30 @@
// do not run for tests clusters to avoid uncleaned info.
// do not try to use count into resource, it will fails trying to connect to vault
{% if vault_auth_method != "none" and not test_cluster %}
resource "vault_generic_secret" "cluster-access" {
path = "official-clusters-access/${var.organization_id}-${var.kubernetes_cluster_id}"
data_json = <<EOT
{
"cloud_provider": "${var.cloud_provider}",
"cluster_name": "${var.kubernetes_cluster_name}",
"organization_id": "${var.organization_id}",
"test_cluster": "${var.test_cluster}",
"grafana_login": "{{ grafana_admin_user }}",
"grafana_password": "{{ grafana_admin_password }}",
"AWS_ACCESS_KEY_ID": "{{ aws_access_key }}",
"AWS_SECRET_ACCESS_KEY": "{{ aws_secret_key }}",
"AWS_DEFAULT_REGION": "{{ aws_region }}"
}
EOT
# TODO: add kubeconfig content to vault
# "KUBECONFIG_b64": "${local.kubeconfig_base64}",
# locals {
# kubeconfig_base64 = base64encode(local.kubeconfig)
# }
depends_on = [
aws_instance.ec2_instance,
]
}
{% endif %}

View File

@@ -0,0 +1,96 @@
data "aws_iam_policy_document" "rds_enhanced_monitoring" {
statement {
actions = [
"sts:AssumeRole",
]
effect = "Allow"
principals {
type = "Service"
identifiers = ["monitoring.rds.amazonaws.com"]
}
}
}
locals {
tags_rds = merge(
aws_instance.ec2_instance.tags,
{
"Service" = "RDS"
}
)
}
# Network
resource "aws_subnet" "rds_zone_a" {
count = length(var.rds_subnets_zone_a)
availability_zone = var.aws_availability_zones[0]
cidr_block = var.rds_subnets_zone_a[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_rds
}
resource "aws_subnet" "rds_zone_b" {
count = length(var.rds_subnets_zone_b)
availability_zone = var.aws_availability_zones[1]
cidr_block = var.rds_subnets_zone_b[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_rds
}
resource "aws_subnet" "rds_zone_c" {
count = length(var.rds_subnets_zone_c)
availability_zone = var.aws_availability_zones[2]
cidr_block = var.rds_subnets_zone_c[count.index]
vpc_id = aws_vpc.ec2.id
tags = local.tags_rds
}
resource "aws_route_table_association" "rds_cluster_zone_a" {
count = length(var.rds_subnets_zone_a)
subnet_id = aws_subnet.rds_zone_a.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "rds_cluster_zone_b" {
count = length(var.rds_subnets_zone_b)
subnet_id = aws_subnet.rds_zone_b.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_route_table_association" "rds_cluster_zone_c" {
count = length(var.rds_subnets_zone_c)
subnet_id = aws_subnet.rds_zone_c.*.id[count.index]
route_table_id = aws_route_table.ec2_instance.id
}
resource "aws_db_subnet_group" "rds" {
description = "RDS linked to ${var.kubernetes_cluster_id}"
name = aws_vpc.ec2.id
subnet_ids = flatten([aws_subnet.rds_zone_a.*.id, aws_subnet.rds_zone_b.*.id, aws_subnet.rds_zone_c.*.id])
tags = local.tags_rds
}
# IAM
resource "aws_iam_role" "rds_enhanced_monitoring" {
name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}"
assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json
tags = local.tags_rds
}
resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" {
role = aws_iam_role.rds_enhanced_monitoring.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
}

View File

@@ -0,0 +1,53 @@
// S3 bucket to store kubeconfigs
resource "aws_s3_bucket" "kubeconfigs_bucket" {
bucket = var.s3_bucket_kubeconfig
force_destroy = true
tags = merge(
local.tags_ec2,
{
"Name" = "Kubernetes kubeconfig"
}
)
}
resource "aws_s3_bucket_acl" "kubeconfigs_bucket_acl" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
acl = "private"
}
resource "aws_s3_bucket_server_side_encryption_configuration" "kubeconfigs_bucket_encryption" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn
sse_algorithm = "aws:kms"
}
}
}
resource "aws_s3_bucket_versioning" "kubeconfigs_bucket_versionning" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_kms_key" "s3_kubeconfig_kms_encryption" {
description = "s3 kubeconfig encryption"
tags = merge(
local.tags_ec2,
{
"Name" = "Kubeconfig Encryption"
}
)
}
resource "aws_s3_bucket_public_access_block" "kubeconfigs_access" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
ignore_public_acls = true
restrict_public_buckets = true
block_public_policy = true
block_public_acls = true
}

View File

@@ -0,0 +1,17 @@
locals {
tags_common = {
ClusterId = var.kubernetes_cluster_id
ClusterLongId = var.kubernetes_full_cluster_id
OrganizationId = var.organization_id,
Region = var.region
creationDate = time_static.on_ec2_create.rfc3339
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %}
}
tags_ec2 = merge(
local.tags_common,
{
"Service" = "EC2"
}
)
}

View File

@@ -0,0 +1,315 @@
# Qovery
variable "cloud_provider" {
description = "Cloud provider name"
default = "aws"
type = string
}
variable "region" {
description = "AWS region to store terraform state and lock"
default = "{{ aws_region }}"
type = string
}
variable "organization_id" {
description = "Qovery Organization ID"
default = "{{ organization_id }}"
type = string
}
variable "qovery_nats_url" {
description = "URL of qovery nats server"
default = "{{ qovery_nats_url }}"
type = string
}
variable "qovery_nats_user" {
description = "user of qovery nats server"
default = "{{ qovery_nats_user }}"
type = string
}
variable "qovery_nats_password" {
description = "password of qovery nats server"
default = "{{ qovery_nats_password }}"
type = string
}
variable "test_cluster" {
description = "Is this a test cluster?"
default = "{{ test_cluster }}"
type = string
}
# AWS specific
variable "aws_availability_zones" {
description = "AWS availability zones"
default = {{ aws_availability_zones }}
type = list(string)
}
variable "vpc_cidr_block" {
description = "VPC CIDR block"
default = "{{ vpc_cidr_block }}"
type = string
}
# ec2
variable "ec2_image_info" {
description = "EC2 image information"
default = {
"name" = "debian-10-amd64*"
"owners" = "136693071363"
}
type = map(string)
}
variable "ec2_instance" {
description = "EC2 instance configuration"
default = {
"instance_type" = "t3.micro"
}
type = map(string)
}
variable "k3s_config" {
description = "K3s configuration"
default = {
"version" = "v1.20.15+k3s1"
"channel" = "stable"
"exec" = "--disable=traefik"
}
type = map(string)
}
variable "ec2_subnets_zone_a_private" {
description = "EC2 private subnets Zone A"
default = {{ ec2_zone_a_subnet_blocks_private }}
type = list(string)
}
variable "ec2_subnets_zone_b_private" {
description = "EC2 private subnets Zone B"
default = {{ ec2_zone_b_subnet_blocks_private }}
type = list(string)
}
variable "ec2_subnets_zone_c_private" {
description = "EC2 private subnets Zone C"
default = {{ ec2_zone_c_subnet_blocks_private }}
type = list(string)
}
{% if vpc_qovery_network_mode == "WithNatGateways" %}
variable "ec2_subnets_zone_a_public" {
description = "EC2 public subnets Zone A"
default = {{ ec2_zone_a_subnet_blocks_public }}
type = list(string)
}
variable "ec2_subnets_zone_b_public" {
description = "EC2 public subnets Zone B"
default = {{ ec2_zone_b_subnet_blocks_public }}
type = list(string)
}
variable "ec2_subnets_zone_c_public" {
description = "EC2 public subnets Zone C"
default = {{ ec2_zone_c_subnet_blocks_public }}
type = list(string)
}
{% endif %}
variable "ec2_cidr_subnet" {
description = "EC2 CIDR (x.x.x.x/CIDR)"
default = {{ ec2_cidr_subnet }}
type = number
}
variable "ec2_k8s_versions" {
description = "Kubernetes version"
default = {
"masters": "{{ ec2_masters_version }}",
"workers": "{{ ec2_workers_version }}",
}
type = map(string)
}
variable "kubernetes_full_cluster_id" {
description = "Kubernetes full cluster id"
default = "{{ kubernetes_full_cluster_id }}"
type = string
}
variable "kubernetes_cluster_id" {
description = "Kubernetes cluster id"
default = "{{ kubernetes_cluster_id }}"
type = string
}
variable "kubernetes_cluster_name" {
description = "Kubernetes cluster name"
default = "qovery-{{ kubernetes_cluster_id }}"
type = string
}
variable "ec2_access_cidr_blocks" {
description = "Kubernetes CIDR Block"
default = {{ ec2_access_cidr_blocks }}
type = list(string)
}
# S3 bucket name
variable "s3_bucket_kubeconfig" {
description = "S3 bucket containing kubeconfigs"
default = "{{ s3_kubeconfig_bucket }}"
type = string
}
# Agent info
variable "qovery_agent_info" {
description = "Qovery agent info"
default = {
"token" = "{{ agent_version_controller_token }}"
"api_fqdn" = "{{ qovery_api_url }}"
}
type = map(string)
}
variable "qovery_agent_replicas" {
description = "This variable is used to get random ID generated for the agent"
default = "1"
type = number
}
# RDS
variable "rds_subnets_zone_a" {
description = "RDS subnets Zone A"
default = {{ rds_zone_a_subnet_blocks }}
type = list(string)
}
variable "rds_subnets_zone_b" {
description = "RDS subnets Zone B"
default = {{ rds_zone_b_subnet_blocks }}
type = list(string)
}
variable "rds_subnets_zone_c" {
description = "RDS subnets Zone C"
default = {{ rds_zone_c_subnet_blocks }}
type = list(string)
}
variable "rds_cidr_subnet" {
description = "RDS CIDR (x.x.x.x/CIDR)"
default = {{ rds_cidr_subnet }}
type = number
}
# DocumentDB
variable "documentdb_subnets_zone_a" {
description = "DocumentDB subnets Zone A"
default = {{ documentdb_zone_a_subnet_blocks }}
type = list(string)
}
variable "documentdb_subnets_zone_b" {
description = "DocumentDB subnets Zone B"
default = {{ documentdb_zone_b_subnet_blocks }}
type = list(string)
}
variable "documentdb_subnets_zone_c" {
description = "DocumentDB subnets Zone C"
default = {{ documentdb_zone_c_subnet_blocks }}
type = list(string)
}
variable "documentdb_cidr_subnet" {
description = "DocumentDB CIDR (x.x.x.x/CIDR)"
default = {{ documentdb_cidr_subnet }}
type = number
}
# Elasticache
variable "elasticache_subnets_zone_a" {
description = "Elasticache subnets Zone A"
default = {{ elasticache_zone_a_subnet_blocks }}
type = list(string)
}
variable "elasticache_subnets_zone_b" {
description = "Elasticache subnets Zone B"
default = {{ elasticache_zone_b_subnet_blocks }}
type = list(string)
}
variable "elasticache_subnets_zone_c" {
description = "Elasticache subnets Zone C"
default = {{ elasticache_zone_c_subnet_blocks }}
type = list(string)
}
variable "elasticache_cidr_subnet" {
description = "Elasticache CIDR (x.x.x.x/CIDR)"
default = {{ elasticache_cidr_subnet }}
type = number
}
# Elasticsearch
variable "elasticsearch_subnets_zone_a" {
description = "Elasticsearch subnets Zone A"
default = {{ elasticsearch_zone_a_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_subnets_zone_b" {
description = "Elasticsearch subnets Zone B"
default = {{ elasticsearch_zone_b_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_subnets_zone_c" {
description = "Elasticsearch subnets Zone C"
default = {{ elasticsearch_zone_c_subnet_blocks }}
type = list(string)
}
variable "elasticsearch_cidr_subnet" {
description = "Elasticsearch CIDR (x.x.x.x/CIDR)"
default = {{ elasticsearch_cidr_subnet }}
type = number
}
# Qovery features
variable "log_history_enabled" {
description = "Enable log history"
default = {{ log_history_enabled }}
type = bool
}
variable "metrics_history_enabled" {
description = "Enable metrics history"
default = {{ metrics_history_enabled }}
type = bool
}
{%- if resource_expiration_in_seconds is defined %}
# Pleco ttl
variable "resource_expiration_in_seconds" {
description = "Resource expiration in seconds"
default = {{ resource_expiration_in_seconds }}
type = number
}
{% endif %}

View File

@@ -0,0 +1,60 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.12.1"
}
external = {
source = "hashicorp/external"
version = "~> 2.2"
}
vault = {
source = "hashicorp/vault"
version = "~> 3.5"
}
local = {
source = "hashicorp/local"
version = "~> 2.2"
}
null = {
source = "hashicorp/null"
version = "~> 3.1"
}
random = {
source = "hashicorp/random"
version = "~> 3.1"
}
time = {
source = "hashicorp/time"
version = "~> 0.7"
}
}
required_version = ">= 0.13"
}
provider "aws" {
profile = "default"
access_key = "{{ aws_access_key }}"
secret_key = "{{ aws_secret_key }}"
region = "{{ aws_region }}"
}
provider "aws" {
alias = "tfstates"
access_key = "{{ aws_access_key_tfstates_account }}"
secret_key = "{{ aws_secret_key_tfstates_account }}"
region = "{{ aws_region_tfstates_account }}"
}
provider "vault" {
{% if vault_auth_method == "app_role" and not test_cluster %}
auth_login {
path = "auth/approle/login"
parameters = {
role_id = "{{ vault_role_id }}"
secret_id = "{{ vault_secret_id }}"
}
}
{% endif %}
}

View File

@@ -0,0 +1,546 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride
# namespaceOverride: my-global-namespace
image:
## Bitnami MongoDB registry
##
registry: quay.io
## Bitnami MongoDB image name
##
repository: bitnami/mongodb
## Bitnami MongoDB image tag
## ref: https://hub.docker.com/r/bitnami/mongodb/tags/
##
tag: "{{ version }}"
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns on Bitnami debugging in minideb-extras-base
## ref: https://github.com/bitnami/minideb-extras-base
debug: true
## String to partially override mongodb.fullname template (will maintain the release name)
##
# nameOverride:
nameOverride: {{ sanitized_name }}
## String to fully override mongodb.fullname template
##
# fullnameOverride:
fullnameOverride: {{ sanitized_name }}
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Add custom extra environment variables to all the MongoDB containers
# extraEnvVars:
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources:
limits: {}
requests:
cpu: "{{ total_cpus }}"
memory: "{{ total_ram_in_mib }}Mi"
## Enable authentication
## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/
#
usePassword: true
# existingSecret: name-of-existing-secret
## MongoDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run
##
mongodbRootPassword: '{{ database_password }}'
## MongoDB custom user and database
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run
##
mongodbUsername: '{{ database_login }}'
mongodbPassword: '{{ database_password }}'
mongodbDatabase: {{ database_db_name }}
## Whether enable/disable IPv6 on MongoDB
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6
##
mongodbEnableIPv6: false
## Whether enable/disable DirectoryPerDB on MongoDB
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb
##
mongodbDirectoryPerDB: false
## MongoDB System Log configuration
## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level
##
mongodbSystemLogVerbosity: 0
mongodbDisableSystemLog: false
## MongoDB additional command line flags
##
## Can be used to specify command line flags, for example:
##
## mongodbExtraFlags:
## - "--wiredTigerCacheSizeGB=2"
mongodbExtraFlags: []
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Kubernetes Cluster Domain
clusterDomain: cluster.local
## Kubernetes service type
service:
## Specify an explicit service name.
# name: svc-mongo
## Provide any additional annotations which may be required.
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
{% if publicly_accessible -%}
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
{% endif %}
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
# clusterIP: None
port: {{ private_port }}
qovery_name: {{ service_name }}
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Specify the externalIP value ClusterIP service type.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
# externalIPs: []
## Specify the loadBalancerIP value for LoadBalancer service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
##
# loadBalancerIP:
## Specify the loadBalancerSourceRanges value for LoadBalancer service types.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges: []
## Use StatefulSet instead of Deployment when deploying standalone
useStatefulSet: true
## Setting up replication
## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication
#
replicaSet:
## Whether to create a MongoDB replica set for high availability or not
enabled: false
useHostnames: true
## Name of the replica set
##
name: rs0
## Key used for replica set authentication
##
# key: key
## Number of replicas per each node type
##
replicas:
secondary: 1
arbiter: 1
## Pod Disruption Budget
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
pdb:
enabled: true
minAvailable:
secondary: 1
arbiter: 1
# maxUnavailable:
# secondary: 1
# arbiter: 1
# Annotations to be added to the deployment or statefulsets
annotations: {}
# Additional abels to apply to the deployment or statefulsets
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
# Annotations to be added to MongoDB pods
podAnnotations: {}
# Additional pod labels to apply
podLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Define separate resources per arbiter, which are less then primary or secondary
# used only when replica set is enabled
resourcesArbiter: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 256Mi
## Pod priority
## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: ""
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Define Separate nodeSelector for secondaries
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelectorSecondary: {}
## Define Separate nodeSelector for arbiter
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelectorArbiter: {}
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Define separate affinity for arbiter pod
affinityArbiter: {}
## Tolerations
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Array to add extra volumes
##
extraVolumes: []
## Array to add extra mounts (normally used with extraVolumes)
##
extraVolumeMounts: []
## Add sidecars to the arbiter pod
# used only when replica set is enabled
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecarsArbiter: []
## Array to add extra volumes to the arbiter
# used only when replica set is enabled
##
extraVolumesArbiter: []
## Array to add extra mounts (normally used with extraVolumes) to the arbiter
# used only when replica set is enabled
##
extraVolumeMountsArbiter: []
## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## MongoDB images.
##
mountPath: /bitnami/mongodb
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
## mongodb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# storageClassSecondary: "-"
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ database_disk_size_in_gib }}Gi
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
## Configure the ingress resource that allows you to access the
## MongoDB installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: mongodb.local
path: /
## The tls configuration for the ingress
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
tls:
- hosts:
- mongodb.local
secretName: mongodb.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: airflow.local-tls
# key:
# certificate:
## Configure the options for init containers to be run before the main app containers
## are started. All init containers are run sequentially and must exit without errors
## for the next one to be started.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
# extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
# Define custom config map with init scripts
initConfigMap: {}
# name: "init-config-map"
## Entries for the MongoDB config file. For documentation of all options, see:
## http://docs.mongodb.org/manual/reference/configuration-options/
##
configmap:
# # where and how to store data.
# storage:
# dbPath: /bitnami/mongodb/data/db
# journal:
# enabled: true
# directoryPerDB: false
# # where to write logging data.
# systemLog:
# destination: file
# quiet: false
# logAppend: true
# logRotate: reopen
# path: /opt/bitnami/mongodb/logs/mongodb.log
# verbosity: 0
# # network interfaces
# net:
# port: 27017
# unixDomainSocket:
# enabled: true
# pathPrefix: /opt/bitnami/mongodb/tmp
# ipv6: false
# bindIpAll: true
# # replica set options
# #replication:
# #replSetName: replicaset
# #enableMajorityReadConcern: true
# # process management options
# processManagement:
# fork: false
# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid
# # set parameter options
# setParameter:
# enableLocalhostAuthBypass: true
# # security options
# security:
# authorization: disabled
# #keyFile: /opt/bitnami/mongodb/conf/keyfile
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/mongodb-exporter
tag: 0.11.0-debian-10-r45
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String with extra arguments to the metrics exporter
## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go
extraArgs: ""
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Metrics exporter liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
## Metrics exporter pod Annotation
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9216"
## Prometheus Service Monitor
## ref: https://github.com/coreos/prometheus-operator
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md
serviceMonitor:
## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
enabled: false
## Specify a namespace if needed
# namespace: monitoring
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}
## Specify Metric Relabellings to add to the scrape endpoint
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
# relabellings:
alerting:
## Define individual alerting rules as required
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup
## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
rules: {}
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}

View File

@@ -0,0 +1,603 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami MySQL image
## ref: https://hub.docker.com/r/bitnami/mysql/tags/
##
image:
debug: false
registry: quay.io
repository: bitnami/mysql
tag: "{{ version }}"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override mysql.fullname template (will maintain the release name)
##
# nameOverride:
nameOverride: {{ sanitized_name }}
## String to fully override mysql.fullname template
##
# fullnameOverride:
fullnameOverride: {{ sanitized_name }}
## Cluster domain
##
clusterDomain: cluster.local
commonLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests:
cpu: "{{ database_total_cpus }}"
memory: "{{ database_ram_size_in_mib }}Mi"
## Use existing secret (ignores root, db and replication passwords)
##
# existingSecret:
## Admin (root) credentials
##
root:
## MySQL admin password
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run
##
password: '{{ database_password }}'
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount admin password as a file instead of using an environment variable
##
injectSecretsAsVolume: true
## Custom user/db credentials
##
db:
## MySQL username and password
## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run
## Note that this user should be different from the MySQL replication user (replication.user)
##
user: '{{ database_login }}'
password: '{{ database_password }}'
## Database to create
## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run
##
name: {{ sanitized_name }}
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount replication user password as a file instead of using an environment variable
##
injectSecretsAsVolume: true
## Replication configuration
##
replication:
## Enable replication. This enables the creation of replicas of MySQL. If false, only a
## master deployment would be created
##
enabled: false
##
## MySQL replication user
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster
## Note that this user should be different from the MySQL user (db.user)
##
user: replicator
## MySQL replication user password
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster
##
password:
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount replication user password as a file instead of using an environment variable
##
injectSecretsAsVolume: false
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
#
## ConfigMap with scripts to be run at first boot
## Note: This will override initdbScripts
# initdbScriptsConfigMap:
serviceAccount:
create: true
## Specify the name of the service account created/used
# name:
## Master nodes parameters
##
master:
## Configure MySQL with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
default_authentication_plugin=mysql_native_password
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
plugin_dir=/opt/bitnami/mysql/plugin
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
## updateStrategy for master nodes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
extraEnvVars:
## ConfigMap with extra env vars:
##
extraEnvVarsCM:
## Secret with extra env vars:
##
extraEnvVarsSecret:
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## MySQL master pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## MySQL master containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## Example:
## containerSecurityContext:
## capabilities:
## drop: ["NET_RAW"]
## readOnlyRootFilesystem: true
##
containerSecurityContext: {}
## MySQL master containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 250m
# memory: 256Mi
requests: {}
# cpu: 250m
# memory: 256Mi
## MySQL master containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Enable persistence using PVCs on master nodes
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
mountPath: /bitnami/mysql
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
storageClass: "aws-ebs-gp2-0"
## PVC annotations
##
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: {{ database_disk_size_in_gib }}Gi
## Use an existing PVC
##
# existingClaim:
## Slave nodes parameters
##
slave:
## Number of slave replicas
##
replicas: 2
## Configure MySQL slave with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
default_authentication_plugin=mysql_native_password
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
## updateStrategy for slave nodes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
extraEnvVars:
## ConfigMap with extra env vars:
##
extraEnvVarsCM:
## Secret with extra env vars:
##
extraEnvVarsSecret:
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## MySQL slave pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## MySQL slave containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## Example:
## containerSecurityContext:
## capabilities:
## drop: ["NET_RAW"]
## readOnlyRootFilesystem: true
##
containerSecurityContext: {}
## MySQL slave containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 250m
# memory: 256Mi
requests: {}
# cpu: 250m
# memory: 256Mi
## MySQL slave containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Enable persistence using PVCs on slave nodes
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
mountPath: /bitnami/mysql
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## PVC annotations
##
annotations: {}
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: {{ database_disk_size_in_gib }}Gi
## Use an existing PVC
##
# existingClaim:
## MySQL Service properties
##
service:
## MySQL Service type
##
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
name: {{ service_name }}
## MySQL Service port
##
port: 3306
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort:
master:
slave:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
{% if publicly_accessible -%}
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
{% endif %}
## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
## loadBalancerIP for the MySQL Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# loadBalancerIP:
# master:
# slave:
## MySQL prometheus metrics parameters
## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint
##
metrics:
enabled: false
## Bitnami MySQL Prometheus exporter image
## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-10-r127
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## MySQL Prometheus exporter containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 0.5
# memory: 256Mi
requests: {}
# cpu: 0.5
# memory: 256Mi
## MySQL Prometheus exporter service parameters
##
service:
type: ClusterIP
port: 9104
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9104"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
enabled: false
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus

View File

@@ -0,0 +1,572 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
##
controller:
name: controller
image:
repository: quay.io/kubernetes-ingress-controller/nginx-ingress-controller
tag: "0.30.0"
pullPolicy: IfNotPresent
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: true
# This will fix the issue of HPA not being able to read the metrics.
# Note that if you enable it for existing deployments, it won't work as the labels are immutable.
# We recommend setting this to true for new deployments.
useComponentLabel: true
# Configures the ports the nginx-controller listens on
containerPort:
http: 80
https: 443
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
config:
client_max_body_size: 100m
proxy-body-size: 100m
server-tokens: "false"
# Maxmind license key to download GeoLite2 Databases
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
maxmindLicenseKey: ""
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
proxySetHeaders: {}
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
addHeaders: {}
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
# is merged
hostNetwork: false
# Optionally customize the pod dnsConfig.
dnsConfig: {}
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
dnsPolicy: ClusterFirst
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
reportNodeInternalIp: false
## Use host ports 80 and 443
daemonset:
useHostPort: false
hostPorts:
http: 80
https: 443
## Required only if defaultBackend.enabled = false
## Must be <namespace>/<service_name>
##
defaultBackendService: ""
## Election ID to use for status update
##
electionID: ingress-controller-leader-{{ id }}
## Name of the ingress class to route through this controller
##
ingressClass: "{{ id }}"
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Security Context policies for controller pods
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
##
podSecurityContext: {}
## Allows customization of the external service
## the ingress will be bound to via DNS
publishService:
enabled: true
## Allows overriding of the publish service to bind to
## Must be <namespace>/<service_name>
##
pathOverride: ""
## Limit the scope of the controller
##
scope:
enabled: true
namespace: "{{ namespace }}" # defaults to .Release.Namespace
## Allows customization of the configmap / nginx-configmap namespace
##
configMapNamespace: "" # defaults to .Release.Namespace
## Allows customization of the tcp-services-configmap namespace
##
tcp:
configMapNamespace: "" # defaults to .Release.Namespace
## Allows customization of the udp-services-configmap namespace
##
udp:
configMapNamespace: "" # defaults to .Release.Namespace
## Additional command line arguments to pass to nginx-ingress-controller
## E.g. to specify the default SSL certificate you can use
## extraArgs:
## default-ssl-certificate: "<namespace>/<secret_name>"
extraArgs: {}
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## DaemonSet or Deployment
##
kind: Deployment
## Annotations to be added to the controller deployment
##
deploymentAnnotations: {}
# The update strategy to apply to the Deployment or DaemonSet
##
updateStrategy:
rollingUpdate:
maxUnavailable: 1
# minReadySeconds to avoid killing pods before we are ready
##
minReadySeconds: 0
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## Affinity and anti-affinity
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - nginx-ingress
# topologyKey: kubernetes.io/hostname
# # An example of required pod anti-affinity
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - nginx-ingress
# topologyKey: "kubernetes.io/hostname"
## terminationGracePeriodSeconds
##
terminationGracePeriodSeconds: 60
## Node labels for controller pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
## Annotations to be added to controller pods
##
podAnnotations: {}
replicaCount: {{ nginx_minimum_replicas }}
minAvailable: 1
resources:
limits:
cpu: {{ nginx_limit_cpu }}
memory: {{ nginx_limit_memory }}
requests:
cpu: {{ nginx_requests_cpu }}
memory: {{ nginx_requests_memory }}
autoscaling:
enabled: {{ nginx_enable_horizontal_autoscaler }}
minReplicas: {{ nginx_minimum_replicas }}
maxReplicas: {{ nginx_maximum_replicas }}
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
## Override NGINX template
customTemplate:
configMapName: ""
configMapKey: ""
service:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
labels:
app_id : "{{ id }}"
## Deprecated, instead simply do not provide a clusterIP value
omitClusterIP: false
# clusterIP: ""
## List of IP addresses at which the controller services are available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
enableHttp: true
enableHttps: true
## Set external traffic policy to: "Local" to preserve source IP on
## providers supporting it
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
externalTrafficPolicy: "Local"
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
sessionAffinity: ""
healthCheckNodePort: 0
ports:
http: 80
https: 443
targetPorts:
http: http
https: https
type: LoadBalancer
# type: NodePort
# nodePorts:
# http: 32080
# https: 32443
# tcp:
# 8080: 32808
nodePorts:
http: ""
https: ""
tcp: {}
udp: {}
extraContainers: []
## Additional containers to be added to the controller pod.
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
# - name: my-sidecar
# image: nginx:latest
# - name: lemonldap-ng-controller
# image: lemonldapng/lemonldap-ng-controller:0.2.0
# args:
# - /lemonldap-ng-controller
# - --alsologtostderr
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
# env:
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: metadata.namespace
# volumeMounts:
# - name: copy-portal-skins
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
extraVolumeMounts: []
## Additional volumeMounts to the controller main container.
# - name: copy-portal-skins
# mountPath: /var/lib/lemonldap-ng/portal/skins
extraVolumes: []
## Additional volumes to the controller pod.
# - name: copy-portal-skins
# emptyDir: {}
extraInitContainers: []
## Containers, which are run before the app containers are started.
# - name: init-myservice
# image: busybox
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
admissionWebhooks:
enabled: false
failurePolicy: Fail
port: 8443
service:
annotations: {}
## Deprecated, instead simply do not provide a clusterIP value
omitClusterIP: false
# clusterIP: ""
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 443
type: ClusterIP
patch:
enabled: true
image:
repository: jettech/kube-webhook-certgen
tag: v1.0.0
pullPolicy: IfNotPresent
## Provide a priority class name to the webhook patching job
##
priorityClassName: ""
podAnnotations: {}
nodeSelector: {}
metrics:
port: 10254
# if this port is changed, change healthz-port: in extraArgs: accordingly
enabled: false
service:
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "10254"
## Deprecated, instead simply do not provide a clusterIP value
omitClusterIP: false
# clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 9913
type: ClusterIP
serviceMonitor:
enabled: false
additionalLabels: {}
namespace: ""
namespaceSelector: {}
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
scrapeInterval: 30s
# honorLabels: true
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
# # These are just examples rules, please adapt them to your needs
# - alert: TooMany500s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: critical
# annotations:
# description: Too many 5XXs
# summary: More than 5% of the all requests did return 5XX, this require your attention
# - alert: TooMany400s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: critical
# annotations:
# description: Too many 4XXs
# summary: More than 5% of the all requests did return 4XX, this require your attention
lifecycle: {}
priorityClassName: ""
## Rollback limit
##
revisionHistoryLimit: 10
## Default 404 backend
##
defaultBackend:
## If false, controller.defaultBackendService must be provided
##
enabled: true
name: default-backend
image:
repository: k8s.gcr.io/defaultbackend-amd64
tag: "1.5"
pullPolicy: IfNotPresent
# nobody user -> uid 65534
runAsUser: 65534
# This will fix the issue of HPA not being able to read the metrics.
# Note that if you enable it for existing deployments, it won't work as the labels are immutable.
# We recommend setting this to true for new deployments.
useComponentLabel: false
extraArgs: {}
serviceAccount:
create: true
name:
## Additional environment variables to set for defaultBackend pods
extraEnvs: []
port: 8080
## Readiness and liveness probes for default backend
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 0
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
affinity: {}
## Security Context policies for controller pods
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
##
podSecurityContext: {}
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Node labels for default backend pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Annotations to be added to default backend pods
##
podAnnotations: {}
replicaCount: 1
minAvailable: 1
resources: {}
# limits:
# cpu: 10m
# memory: 20Mi
# requests:
# cpu: 10m
# memory: 20Mi
service:
annotations: {}
## Deprecated, instead simply do not provide a clusterIP value
omitClusterIP: false
# clusterIP: ""
## List of IP addresses at which the default backend service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 80
type: ClusterIP
priorityClassName: ""
# If provided, the value will be used as the `release` label instead of .Release.Name
releaseLabelOverride: ""
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
rbac:
create: true
scope: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
serviceAccount:
create: true
name:
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
# TCP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# UDP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
##
udp: {}
# 53: "kube-system/kube-dns:53"

View File

@@ -0,0 +1,568 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
postgresql: {}
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
##
image:
registry: quay.io
repository: bitnami/postgresql
tag: "{{ version }}"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override postgresql.fullname template (will maintain the release name)
##
nameOverride: {{ sanitized_name }}
## String to fully override postgresql.fullname template
##
fullnameOverride: {{ sanitized_name }}
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: true
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
##
securityContext:
runAsUser: 0
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
enabled: false
## sanitized_name of an already existing service account. Setting this value disables the automatic service account creation.
# name:
## Pod Security Policy
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
psp:
create: false
## Creates role for ServiceAccount
## Required for PSP
rbac:
create: true
replication:
enabled: false
user: repl_user
password: repl_password
slaveReplicas: 1
## Set synchronous commit mode: on, off, remote_apply, remote_write and local
## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
synchronousCommit: "off"
## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
## NOTE: It cannot be > slaveReplicas
numSynchronousReplicas: 0
## Replication Cluster application name. Useful for defining multiple replication policies
applicationName: my_application
## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
postgresqlPostgresPassword: '{{ database_password }}'
## PostgreSQL user (has superuser privileges if username is `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
postgresqlUsername: '{{ database_login }}'
## PostgreSQL password
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
##
postgresqlPassword: '{{ database_password }}'
## PostgreSQL password using existing secret
## existingSecret: secret
## Mount PostgreSQL secret as a file instead of passing environment variable
# usePasswordFile: false
## Create a database
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
##
postgresqlDatabase: {{ database_db_name }}
## PostgreSQL data dir
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
postgresqlDataDir: /bitnami/postgresql/data
## An array to add extra environment variables
## For example:
## extraEnv:
## - name: FOO
## value: "bar"
##
# extraEnv:
extraEnv: []
## Name of a ConfigMap containing extra env vars
##
# extraEnvVarsCM:
## Specify extra initdb args
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbArgs:
## Specify a custom location for the PostgreSQL transaction log
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbWalDir:
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the files/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# postgresqlConfiguration:
## PostgreSQL extended configuration
## As above, but _appended_ to the main configuration
## Alternatively, you can put your *.conf under the files/conf.d/ directory
## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
# postgresqlExtendedConf:
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pgHbaConfiguration: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
# configurationConfigMap:
## ConfigMap with PostgreSQL extended configuration
# extendedConfConfigMap:
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
## ConfigMap with scripts to be run at first boot
## NOTE: This will override initdbScripts
# initdbScriptsConfigMap:
## Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
# initdbScriptsSecret:
## Specify the PostgreSQL username and password to execute the initdb scripts
initdbUser: postgres
initdbPassword: '{{ database_password }}'
## Optional duration in seconds the pod needs to terminate gracefully.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
# terminationGracePeriodSeconds: 30
## LDAP configuration
##
ldap:
enabled: false
url: ""
server: ""
port: ""
prefix: ""
suffix: ""
baseDN: ""
bindDN: ""
bind_password:
search_attr: ""
search_filter: ""
scheme: ""
tls: false
## PostgreSQL service configuration
service:
## PosgresSQL service type
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
# clusterIP: None
port: 5432
name: {{ service_name }}
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. Evaluated as a template.
##
{% if publicly_accessible -%}
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
{% endif %}
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources. Evaluated as a template.
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Start master and slave(s) pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes)
## limit `/dev/shm` to `64M` (see e.g. the
## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
## [containerd issue](https://github.com/containerd/containerd/issues/3654),
## which could be not enough if PostgreSQL uses parallel workers heavily.
##
shmVolume:
## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove
## this limitation.
##
enabled: true
## Set to `true` to `chmod 777 /dev/shm` on a initContainer.
## This option is ingored if `volumePermissions.enabled` is `false`
##
chmod:
enabled: true
## PostgreSQL data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## PostgreSQL images.
##
mountPath: /bitnami/postgresql
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ database_disk_size_in_gib }}Gi
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
##
## PostgreSQL Master parameters
##
master:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
annotations: {}
podLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
podAnnotations: {}
priorityClassName: ""
## Extra init containers
## Example
##
## extraInitContainers:
## - name: do-something
## image: busybox
## command: ['do', 'something']
extraInitContainers: []
## Additional PostgreSQL Master Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Master Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Override the service configuration for master
##
service: {}
# type:
# nodePort:
# clusterIP:
##
## PostgreSQL Slave parameters
##
slave:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Slave Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Slave Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Override the service configuration for slave
##
service: {}
# type:
# nodePort:
# clusterIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: "{{ database_ram_size_in_mib }}Mi"
cpu: "{{ database_total_cpus }}"
## Add annotations to all the deployed resources
##
commonAnnotiations: {}
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port PostgreSQL is listening
## on. When true, PostgreSQL will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the DB.
## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
## Example:
## explicitNamespacesSelector:
## matchLabels:
## role: frontend
## matchExpressions:
## - {key: role, operator: In, values: [frontend]}
explicitNamespacesSelector: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure metrics exporter
##
metrics:
enabled: false
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
loadBalancerIP:
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.8.0-debian-10-r116
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Define additional custom metrics
## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
# customMetrics:
# pg_database:
# query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
# metrics:
# - name:
# usage: "LABEL"
# description: "Name of the database"
# - size_bytes:
# usage: "GAUGE"
# description: "Size of the database in bytes"
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## Configure extra options for liveness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1

View File

@@ -0,0 +1,788 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
redis: {}
## Bitnami Redis image version
## ref: https://hub.docker.com/r/bitnami/redis/tags/
##
image:
registry: quay.io
repository: bitnami/redis
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links
##
tag: "{{ version }}"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override redis.fullname template (will maintain the release name)
##
nameOverride: {{ sanitized_name }}
## String to fully override redis.fullname template
##
fullnameOverride: {{ sanitized_name }}
## Cluster settings
cluster:
enabled: false
slaveCount: 3
## Use redis sentinel in the redis pod. This will disable the master and slave services and
## create one redis service with ports to the sentinel and the redis instances
sentinel:
enabled: false
## Require password authentication on the sentinel itself
## ref: https://redis.io/topics/sentinel
usePassword: true
## Bitnami Redis Sentintel image version
## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/
##
image:
registry: docker.io
repository: bitnami/redis-sentinel
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links
##
tag: {{ version }}
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
masterSet: mymaster
initialCheckTimeout: 5
quorum: 2
downAfterMilliseconds: 60000
failoverTimeout: 18000
parallelSyncs: 1
port: 26379
## Additional Redis configuration for the sentinel nodes
## ref: https://redis.io/topics/config
##
configmap:
## Enable or disable static sentinel IDs for each replicas
## If disabled each sentinel will generate a random id at startup
## If enabled, each replicas will have a constant ID on each start-up
##
staticID: false
## Configure extra options for Redis Sentinel liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
customLivenessProbe: {}
customReadinessProbe: {}
## Redis Sentinel resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis Sentinel Service properties
service:
## Redis Sentinel Service type
type: ClusterIP
sentinelPort: 26379
redisPort: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# sentinelNodePort:
# redisNodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
loadBalancerIP:
## Specifies the Kubernetes Cluster's Domain Name.
##
clusterDomain: cluster.local
networkPolicy:
## Specifies whether a NetworkPolicy should be created
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port Redis is listening
## on. When true, Redis will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional).
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
rbac:
## Specifies whether RBAC resources should be created
##
create: true
role:
## Rules to create. It follows the role specification
# rules:
# - apiGroups:
# - extensions
# resources:
# - podsecuritypolicies
# verbs:
# - use
# resourceNames:
# - gce.unprivileged
rules: []
## Redis pod Security Context
securityContext:
enabled: true
fsGroup: 1001
## sysctl settings for master and slave pods
##
## Uncomment the setting below to increase the net.core.somaxconn value
##
# sysctls:
# - name: net.core.somaxconn
# value: "10000"
## Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
containerSecurityContext:
enabled: true
runAsUser: 1001
## Use password authentication
usePassword: true
## Redis password (both master and slave)
## Defaults to a random 10-character alphanumeric string if not set and usePassword is true
## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run
##
password: '{{ database_password }}'
## Use existing secret (ignores previous password)
# existingSecret:
## Password key to be retrieved from Redis secret
##
# existingSecretPasswordKey:
## Mount secrets as files instead of environment variables
usePasswordFile: false
## Persist data to a persistent volume (Redis Master)
persistence:
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
existingClaim:
# Redis port
redisPort: 6379
##
## TLS configuration
##
tls:
# Enable TLS traffic
enabled: false
#
# Whether to require clients to authenticate or not.
authClients: true
#
# Name of the Secret that contains the certificates
certificatesSecret:
#
# Certificate filename
certFilename:
#
# Certificate Key filename
certKeyFilename:
#
# CA Certificate filename
certCAFilename:
#
# File containing DH params (in order to support DH based ciphers)
# dhParamsFilename:
##
## Redis Master parameters
##
master:
## Redis command arguments
##
## Can be used to specify command line arguments, for example:
## Note `exec` is prepended to command
##
command: "/run.sh"
## Additional commands to run prior to starting Redis
##
preExecCmds: ""
## Additional Redis configuration for the master nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis additional command line flags
##
## Can be used to specify command line flags, for example:
## extraFlags:
## - "--maxmemory-policy volatile-ttl"
## - "--repl-backlog-size 1024mb"
extraFlags: []
## Comma-separated list of Redis commands to disable
##
## Can be used to disable Redis commands for security reasons.
## Commands will be completely disabled by renaming each to an empty string.
## ref: https://redis.io/topics/security#disabling-of-specific-commands
##
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Master additional pod labels and annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ sanitized_name }}
podAnnotations: {}
## Redis Master resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
# Enable shared process namespace in a pod.
# If set to false (default), each container will run in separate namespace, redis will have PID=1.
# If set to true, the /pause will run as init process and will reap any zombie PIDs,
# for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
# Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
shareProcessNamespace: false
## Configure extra options for Redis Master liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
## Configure custom probes for images other images like
## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7
## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false
##
# customLivenessProbe:
# tcpSocket:
# port: 6379
# initialDelaySeconds: 10
# periodSeconds: 5
# customReadinessProbe:
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# exec:
# command:
# - "container-entrypoint"
# - "bash"
# - "-c"
# - "redis-cli set liveness-probe \"`date`\" | grep OK"
customLivenessProbe: {}
customReadinessProbe: {}
## Redis Master Node selectors and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
##
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Redis Master pod/node affinity/anti-affinity
##
affinity: {}
## Redis Master Service properties
service:
## Redis Master Service type
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
port: 6379
name: {{ service_name }}
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
{% if publicly_accessible -%}
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
{% endif %}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ database_disk_size_in_gib }}Gi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## Redis Master pod priorityClassName
##
priorityClassName: ''
## An array to add extra env vars
## For example:
## extraEnvVars:
## - name: name
## value: value
## - name: other_name
## valueFrom:
## fieldRef:
## fieldPath: fieldPath
##
extraEnvVars: []
## ConfigMap with extra env vars:
##
extraEnvVarsCM: []
## Secret with extra env vars:
##
extraEnvVarsSecret: []
##
## Redis Slave properties
## Note: service.type is a mandatory parameter
## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master
##
slave:
## Slave Service properties
service:
## Redis Slave Service type
type: ClusterIP
## Redis port
port: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Redis slave port
port: 6379
## Can be used to specify command line arguments, for example:
## Note `exec` is prepended to command
##
command: "/run.sh"
## Additional commands to run prior to starting Redis
##
preExecCmds: ""
## Additional Redis configuration for the slave nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis extra flags
extraFlags: []
## List of Redis commands to disable
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Slave pod/node affinity/anti-affinity
##
affinity: {}
## Kubernetes Spread Constraints for pod assignment
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
# - maxSkew: 1
# topologyKey: node
# whenUnsatisfiable: DoNotSchedule
spreadConstraints: {}
# Enable shared process namespace in a pod.
# If set to false (default), each container will run in separate namespace, redis will have PID=1.
# If set to true, the /pause will run as init process and will reap any zombie PIDs,
# for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
# Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
shareProcessNamespace: false
## Configure extra options for Redis Slave liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
## Configure custom probes for images other images like
## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7
## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false
##
# customLivenessProbe:
# tcpSocket:
# port: 6379
# initialDelaySeconds: 10
# periodSeconds: 5
# customReadinessProbe:
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# exec:
# command:
# - "container-entrypoint"
# - "bash"
# - "-c"
# - "redis-cli set liveness-probe \"`date`\" | grep OK"
customLivenessProbe: {}
customReadinessProbe: {}
## Redis slave Resource
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis slave selectors and tolerations for pod assignment
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Redis slave pod Annotation and Labels
podLabels: {}
podAnnotations: {}
## Redis slave pod priorityClassName
# priorityClassName: ''
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
labels: {}
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## An array to add extra env vars
## For example:
## extraEnvVars:
## - name: name
## value: value
## - name: other_name
## valueFrom:
## fieldRef:
## fieldPath: fieldPath
##
extraEnvVars: []
## ConfigMap with extra env vars:
##
extraEnvVarsCM: []
## Secret with extra env vars:
##
extraEnvVarsSecret: []
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.13.1-debian-10-r6
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Extra arguments for Metrics exporter, for example:
## extraArgs:
## check-keys: myKey,myOtherKey
# extraArgs: {}
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9121"
# podLabels: {}
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
## Specify a namespace if needed
# namespace: monitoring
# fallback to the prometheus default unless specified
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## Metrics exporter pod priorityClassName
# priorityClassName: ''
service:
type: ClusterIP
## Use serviceLoadBalancerIP to request a specific static IP,
## otherwise leave blank
# loadBalancerIP:
annotations: {}
labels: {}
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: true
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## Init container Security Context
## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## podSecurityContext.enabled=false,containerSecurityContext.enabled=false
##
securityContext:
runAsUser: 0
## Redis config file
## ref: https://redis.io/topics/config
##
configmap: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
## Sysctl InitContainer
## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings)
sysctlImage:
enabled: false
command: []
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
mountHostSys: false
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## PodSecurityPolicy configuration
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
## Specifies whether a PodSecurityPolicy should be created
##
create: false
## Define a disruption budget
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
##
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,7 @@
apiVersion: v2
name: qovery
description: A Qovery Helm chart for Kubernetes deployments
type: application
version: 0.2.0
appVersion: {{ helm_app_version }}
icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png

View File

@@ -0,0 +1,92 @@
{%- if not is_storage %}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %}
spec:
replicas: {{ min_instances }}
strategy:
type: RollingUpdate
{% if max_instances == 1 %}
rollingUpdate:
maxSurge: 1
{% endif %}
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
template:
metadata:
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %}
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- {{ sanitized_name }}
topologyKey: "kubernetes.io/hostname"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 60
securityContext: {}
{%- if is_registry_secret %}
imagePullSecrets:
- name: {{ registry_secret }}
{%- endif %}
containers:
- name: {{ sanitized_name }}
image: "{{ image_name_with_tag }}"
env:
{%- for ev in environment_variables %}
- name: "{{ ev.key }}"
valueFrom:
secretKeyRef:
name: {{ sanitized_name }}
key: {{ ev.key }}
{%- endfor %}
{%- if private_port %}
ports:
{%- for port in ports %}
- containerPort: {{ port.port }}
name: "p{{ port.port }}"
protocol: TCP
{%- endfor %}
readinessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 10
livenessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 20
{%- endif %}
resources:
limits:
cpu: {{ cpu_burst }}
memory: {{ total_ram_in_mib }}Mi
requests:
cpu: {{ total_cpus }}
memory: {{ total_ram_in_mib }}Mi
{%- endif %}

View File

@@ -0,0 +1,19 @@
{%- if not is_storage and min_instances != max_instances %}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
envId: {{ environment_id }}
appId: {{ id }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ sanitized_name }}
minReplicas: {{ min_instances }}
maxReplicas: {{ max_instances }}
targetCPUUtilizationPercentage: 60
{%- endif %}

View File

@@ -0,0 +1,95 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ sanitized_name }}-default
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
# Deny all ingress by default to this application
podSelector:
matchLabels:
appId: {{ id }}
app: {{ sanitized_name }}
ownerId: {{ owner_id }}
envId: {{ environment_id }}
policyTypes:
- Ingress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ sanitized_name }}-app-access
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
# Then allow some ingress to this application
podSelector:
matchLabels:
appId: {{ id }}
app: {{ sanitized_name }}
ownerId: {{ owner_id }}
envId: {{ environment_id }}
ingress:
# Allow ingress from same environment
- from:
- podSelector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
# Allow ingress from everywhere but only to application port
{% if is_private_port %}
- ports:
- port: {{ private_port }}
{% endif %}
# FIXME(sileht): Previous rule is not perfect as other pods/namespaces can
# access to the application port without going through the Ingress object,
# but that's not critical neither
# Only way to fix that is to allow lb and kube-proxy to access the namespace/pods explictly via IP, eg:
# - from:
# - ipBlock:
# cidr: 10.0.99.179/32
# - ipBlock:
# cidr: 10.0.28.216/32
# - ipBlock:
# cidr: 10.0.98.42/32
# - ipBlock:
# cidr: 10.0.59.208/32
# Since user pods, kube-proxy, and lbs are all in 10.0.0.0/8 we can't write generic rule like:
# - ipBlock:
# cidr: 0.0.0.0/0
# except: [10.0.0.0/8]
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ sanitized_name }}-deny-aws-metadata-server
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
podSelector:
matchLabels:
appId: {{ id }}
app: {{ sanitized_name }}
ownerId: {{ owner_id }}
envId: {{ environment_id }}
egress:
- to:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 169.254.169.254/32

View File

@@ -0,0 +1,21 @@
{%- if not is_storage %}
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
spec:
maxUnavailable: 10%
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
{%- endif %}

View File

@@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
type: Opaque
data:
{%- for ev in environment_variables %}
{{ ev.key }}: |-
{{ ev.value }}
{%- endfor %}

View File

@@ -0,0 +1,26 @@
{%- if (ports is defined) and ports %}
apiVersion: v1
kind: Service
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
spec:
type: ClusterIP
ports:
{%- for port in ports %}
- protocol: TCP
name: "p{{ port.port }}"
port: {{ port.port }}
targetPort: {{ port.port }}
{%- endfor %}
selector:
ownerId: {{ owner_id }}
appId: {{ id }}
app: {{ sanitized_name }}
envId: {{ environment_id }}
{%- endif %}

View File

@@ -0,0 +1,132 @@
{%- if is_storage %}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
releaseTime: {% raw %}{{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }}{% endraw %}
spec:
replicas: {{ min_instances }}
serviceName: {{ sanitized_name }}
selector:
matchLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
template:
metadata:
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
annotations:
checksum/config: {% raw %}{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{% endraw %}
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- {{ sanitized_name }}
topologyKey: "kubernetes.io/hostname"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 60
securityContext: {}
{%- if is_registry_secret %}
imagePullSecrets:
- name: {{ registry_secret }}
{%- endif %}
containers:
- name: {{ sanitized_name }}
image: "{{ image_name_with_tag }}"
env:
{%- for ev in environment_variables %}
- name: "{{ ev.key }}"
valueFrom:
secretKeyRef:
name: {{ sanitized_name }}
key: {{ ev.key }}
{%- endfor %}
{%- if private_port %}
ports:
{%- for port in ports %}
- containerPort: {{ port.port }}
name: "p{{ port.port }}"
protocol: TCP
{%- endfor %}
readinessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 10
livenessProbe:
tcpSocket:
port: {{ private_port }}
initialDelaySeconds: {{ start_timeout_in_seconds }}
periodSeconds: 20
{%- endif %}
resources:
limits:
cpu: {{ cpu_burst }}
memory: {{ total_ram_in_mib }}Mi
requests:
cpu: {{ total_cpus }}
memory: {{ total_ram_in_mib }}Mi
volumeMounts:
{%- for s in storage %}
- name: {{ s.id }}
mountPath: {{ s.mount_point }}
{%- endfor %}
volumeClaimTemplates:
{%- for s in storage %}
{% if clone %}
- metadata:
name: {{ s.id }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
app: {{ sanitized_name }}
diskId: {{ s.id }}
diskType: {{ s.storage_type }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: aws-ebs-{{ s.storage_type }}-0
dataSource:
name: {{ s.id }}
kind: PersistentVolumeClaim
resources:
requests:
storage: {{ disk.size_in_gib }}Gi
{% else %}
- metadata:
name: {{ s.id }}
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
appId: {{ id }}
diskId: {{ s.id }}
diskType: {{ s.storage_type }}
spec:
accessModes:
- ReadWriteOnce
storageClassName: aws-ebs-{{ s.storage_type }}-0
resources:
requests:
storage: {{ s.size_in_gib }}Gi
{%- endif %}
{%- endfor %}
{%- endif %}

View File

@@ -0,0 +1,2 @@
# Don't add anyhting here
# Jinja2 is taken on behalf of Go template

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: qovery
description: A Qovery Helm chart for Kubernetes deployments
type: application
version: 0.2.0
icon: https://uploads-ssl.webflow.com/5de176bfd41c9b0a91bbb0a4/5de17c383719a1490cdb4b82_qovery%20logo-svg%202.png

View File

@@ -0,0 +1,20 @@
{%- if custom_domains|length > 0 %}
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: {{ id }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
spec:
acme:
server: {{ spec_acme_server }}
email: {{ spec_acme_email }}
privateKeySecretRef:
name: acme-{{ id }}-key
solvers:
- http01:
ingress:
class: nginx-qovery
{%- endif %}

View File

@@ -0,0 +1,69 @@
{%- if routes|length >= 1 %}
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ sanitized_name }}
namespace: {{ namespace }}
labels:
ownerId: {{ owner_id }}
routerName: {{ sanitized_name }}
routerId: {{ id }}
envId: {{ environment_id }}
fqdn: "{{ router_default_domain }}"
annotations:
external-dns.alpha.kubernetes.io/hostname: {{ router_default_domain }}
external-dns.alpha.kubernetes.io/ttl: "300"
kubernetes.io/tls-acme: "true"
{%- if custom_domains|length > 0 %}
cert-manager.io/issuer: {{ id }}
{%- else %}
cert-manager.io/cluster-issuer: {{ metadata_annotations_cert_manager_cluster_issuer }}
{%- endif %}
kubernetes.io/ingress.class: "nginx-qovery"
ingress.kubernetes.io/ssl-redirect: "true"
#nginx.ingress.kubernetes.io/enable-cors: "true"
#nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization,x-csrftoken"
{%- if sticky_sessions_enabled == true %}
# https://kubernetes.github.io/ingress-nginx/examples/affinity/cookie/
nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/affinity-mode: "persistent"
nginx.ingress.kubernetes.io/session-cookie-secure: "true"
nginx.ingress.kubernetes.io/session-cookie-name: "INGRESSCOOKIE_QOVERY"
nginx.ingress.kubernetes.io/session-cookie-max-age: "85400" # 1 day
nginx.ingress.kubernetes.io/session-cookie-expires: "85400" # 1 day
nginx.ingress.kubernetes.io/session-cookie-samesite: "Lax"
{%- endif %}
spec:
tls:
{%- if custom_domains|length > 0 %}
- secretName: "router-tls-{{ id }}"
hosts:
{%- for domain in custom_domains %}
- "{{ domain.domain }}"
{%- endfor %}
{%- endif %}
# We dont use secret name as we want to rely on default tls certificate from ingress controller
# which has our wildcard certificate https://cert-manager.io/next-docs/faq/kubed/
rules:
- host: "{{ router_default_domain }}"
http:
paths:
{%- for route in routes %}
- path: "{{ route.path }}"
backend:
serviceName: "{{ route.application_name }}"
servicePort: {{ route.application_port }}
{%- endfor %}
{%- for domain in custom_domains %}
- host: "{{ domain.domain }}"
http:
paths:
{%- for route in routes %}
- path: "{{ route.path }}"
backend:
serviceName: "{{ route.application_name }}"
servicePort: {{ route.application_port }}
{%- endfor %}
{%- endfor %}
{%- endif %}

View File

@@ -0,0 +1,2 @@
# Don't add anyhting here(git hash-object -t tree /dev/null)
# Jinja2 is taken on behalf of Go template

View File

@@ -0,0 +1,21 @@
terraform {
backend "kubernetes" {
secret_suffix = "{{ tfstate_suffix_name }}"
load_config_file = true
config_path = "{{ kubeconfig_path }}"
namespace = "{{ namespace }}"
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws-iam-authenticator"
args = [
"token",
"-i",
"qovery-{{kubernetes_cluster_id}}"]
env = {
AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
AWS_DEFAULT_REGION = "{{ region }}"
}
}
}
}

View File

@@ -0,0 +1,167 @@
# Qovery
variable "cluster_name" {
description = "Kubernetes cluster name"
default = "{{ cluster_name }}"
type = string
}
variable "region" {
description = "AWS region to store terraform state and lock"
default = "{{ region }}"
type = string
}
variable "kubernetes_cluster_id" {
description = "Kubernetes cluster name with region"
default = "{{ kubernetes_cluster_id }}"
type = string
}
variable "region_cluster_name" {
description = "AWS region to store terraform state and lock"
default = "{{ region }}-{{ cluster_name }}"
type = string
}
variable "q_project_id" {
description = "Qovery project ID"
default = "{{ project_id }}"
type = string
}
variable "q_customer_id" {
description = "Qovery customer ID"
default = "{{ owner_id }}"
type = string
}
variable "q_environment_id" {
description = "Qovery client environment"
default = "{{ environment_id }}"
type = string
}
variable "database_tags" {
description = "Qovery database tags"
default = {
"cluster_name" = "{{ cluster_name }}"
"cluster_id" = "{{ kubernetes_cluster_id }}"
"region" = "{{ region }}"
"q_client_id" = "{{ owner_id }}"
"q_environment_id" = "{{ environment_id }}"
"q_project_id" = "{{ project_id }}"
{% if resource_expiration_in_seconds is defined %}
"ttl" = "{{ resource_expiration_in_seconds }}"
{% endif %}
{% if snapshot is defined and snapshot["snapshot_id"] %} meta_last_restored_from = { { snapshot['snapshot_id'] } }
{% endif %}
}
type = map
}
{%- if resource_expiration_in_seconds is defined %}
# Pleco ttl
variable "resource_expiration_in_seconds" {
description = "Resource expiration in seconds"
default = {{resource_expiration_in_seconds}}
type = number
}
{% endif %}
{%- if snapshot is defined %}
# Snapshots
variable "snapshot_identifier" {
description = "Snapshot ID to restore"
default = "{{ snapshot['snapshot_id']}}"
type = string
}
{% endif %}
# Network
variable "publicly_accessible" {
description = "Instance publicly accessible"
default = {{ publicly_accessible }}
type = bool
}
variable "multi_az" {
description = "Multi availability zones"
default = true
type = bool
}
# Upgrades
variable "auto_minor_version_upgrade" {
description = "Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window"
default = true
type = bool
}
variable "apply_changes_now" {
description = "Apply changes now or during the during the maintenance window"
default = false
type = bool
}
variable "preferred_maintenance_window" {
description = "Maintenance window"
default = "Tue:02:00-Tue:04:00"
type = string
}
# Monitoring
variable "performance_insights_enabled" {
description = "Specifies whether Performance Insights are enabled"
default = true
type = bool
}
variable "performance_insights_enabled_retention" {
description = "The amount of time in days to retain Performance Insights data"
default = 7
type = number
}
# Backups
variable "backup_retention_period" {
description = "Backup retention period"
default = 14
type = number
}
variable "preferred_backup_window" {
description = "Maintenance window"
default = "00:00-01:00"
type = string
}
variable "delete_automated_backups" {
description = "Delete automated backups"
default = {{delete_automated_backups}}
type = bool
}
variable "skip_final_snapshot" {
description = "Skip final snapshot"
default = {{ skip_final_snapshot }}
type = bool
}
variable "final_snapshot_name" {
description = "Name of the final snapshot before the database goes deleted"
default = "{{ final_snapshot_name }}"
type = string
}
{%- if snapshot is defined %}
# Snapshots
variable "snapshot_identifier" {
description = "Snapshot ID to restore"
default = "{{ snapshot['snapshot_id']}}"
type = string
}
{% endif %}

View File

@@ -0,0 +1,52 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.36.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 1.3.2"
}
local = {
source = "hashicorp/local"
version = "~> 1.4"
}
time = {
source = "hashicorp/time"
version = "~> 0.3"
}
}
required_version = ">= 0.14"
}
provider "aws" {
profile = "default"
region = "{{ region }}"
access_key = "{{ aws_access_key }}"
secret_key = "{{ aws_secret_key }}"
}
data aws_instance eks_cluster {
name = "qovery-{{kubernetes_cluster_id}}"
}
provider "helm" {
kubernetes {
host = data.aws_instance.eks_cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_instance.eks_cluster.certificate_authority.0.data)
load_config_file = false
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws-iam-authenticator"
args = ["token", "-i", "qovery-{{kubernetes_cluster_id}}"]
env = {
AWS_ACCESS_KEY_ID = "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY = "{{ aws_secret_key }}"
AWS_DEFAULT_REGION = "{{ region }}"
}
}
}
}
resource "time_static" "on_db_create" {}

View File

@@ -0,0 +1,6 @@
locals {
mongodb_database_tags = merge (var.database_tags, {
database_identifier = var.documentdb_identifier
creationDate = time_static.on_db_create.rfc3339
})
}

View File

@@ -0,0 +1,114 @@
data "aws_vpc" "selected" {
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
}
data "aws_subnet_ids" "k8s_subnet_ids" {
vpc_id = data.aws_vpc.selected.id
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
filter {
name = "tag:Service"
values = ["DocumentDB"]
}
}
data "aws_security_group" "selected" {
filter {
name = "tag:Name"
values = ["qovery-eks-workers"]
}
filter {
name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}"
values = ["owned"]
}
}
resource "helm_release" "documentdb_instance_external_name" {
name = "${aws_docdb_cluster.documentdb_cluster.id}-externalname"
chart = "external-name-svc"
namespace = "{{namespace}}"
atomic = true
max_history = 50
set {
name = "target_hostname"
value = aws_docdb_cluster.documentdb_cluster.endpoint
}
set {
name = "source_fqdn"
value = "{{database_fqdn}}"
}
set {
name = "app_id"
value = "{{database_id}}"
}
set {
name = "service_name"
value = "{{service_name}}"
}
depends_on = [
aws_docdb_cluster.documentdb_cluster
]
}
resource "aws_docdb_cluster_instance" "documentdb_cluster_instances" {
count = var.documentdb_instances_number
cluster_identifier = aws_docdb_cluster.documentdb_cluster.id
identifier = "${var.documentdb_identifier}-${count.index}"
instance_class = var.instance_class
# Maintenance and upgrade
auto_minor_version_upgrade = var.auto_minor_version_upgrade
preferred_maintenance_window = var.preferred_maintenance_window
tags = local.mongodb_database_tags
}
resource "aws_docdb_cluster" "documentdb_cluster" {
cluster_identifier = var.documentdb_identifier
tags = local.mongodb_database_tags
# DocumentDB instance basics
port = var.port
timeouts {
create = "60m"
update = "120m"
delete = "60m"
}
master_password = var.password
{%- if snapshot is defined and snapshot["snapshot_id"] %}
# Snapshot
snapshot_identifier = var.snapshot_identifier
{%- else %}
master_username = var.username
engine = "docdb"
{%- endif %}
storage_encrypted = var.encrypt_disk
# Network
db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id
vpc_security_group_ids = data.aws_security_group.selected.*.id
# Maintenance and upgrades
apply_immediately = var.apply_changes_now
# Backups
backup_retention_period = var.backup_retention_period
preferred_backup_window = var.preferred_backup_window
skip_final_snapshot = var.skip_final_snapshot
{%- if not skip_final_snapshot %}
final_snapshot_identifier = var.final_snapshot_name
{%- endif %}
}

View File

@@ -0,0 +1,43 @@
# documentdb instance basics
variable "documentdb_identifier" {
description = "Documentdb cluster name (Cluster identifier)"
default = "{{ fqdn_id }}"
type = string
}
variable "documentdb_instances_number" {
description = "DocumentDB instance numbers"
default = 1
type = number
}
variable "port" {
description = "Documentdb instance port"
default = {{ database_port }}
type = number
}
variable "instance_class" {
description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html"
default = "{{database_instance_type}}"
type = string
}
variable "username" {
description = "Admin username for the master DB user"
default = "{{ database_login }}"
type = string
}
variable "password" {
description = "Admin password for the master DB user"
default = "{{ database_password }}"
type = string
}
variable "encrypt_disk" {
description = "Enable disk encryption"
default = "{{ encrypt_disk }}"
type = string
}

View File

@@ -0,0 +1,6 @@
locals {
mysql_database_tags = merge (var.database_tags, {
database_identifier = var.mysql_identifier
creationDate = time_static.on_db_create.rfc3339
})
}

View File

@@ -0,0 +1,132 @@
data "aws_vpc" "selected" {
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
}
data "aws_subnet_ids" "k8s_subnet_ids" {
vpc_id = data.aws_vpc.selected.id
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
filter {
name = "tag:Service"
values = ["RDS"]
}
}
data "aws_security_group" "selected" {
filter {
name = "tag:Name"
values = ["qovery-eks-workers"]
}
filter {
name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}"
values = ["owned"]
}
}
data "aws_iam_role" "rds_enhanced_monitoring" {
name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}"
}
resource "helm_release" "mysql_instance_external_name" {
name = "${aws_db_instance.mysql_instance.id}-externalname"
chart = "external-name-svc"
namespace = "{{namespace}}"
atomic = true
max_history = 50
set {
name = "target_hostname"
value = aws_db_instance.mysql_instance.address
}
set {
name = "source_fqdn"
value = "{{database_fqdn}}"
}
set {
name = "app_id"
value = "{{database_id}}"
}
set {
name = "service_name"
value = "{{service_name}}"
}
depends_on = [
aws_db_instance.mysql_instance
]
}
resource "aws_db_parameter_group" "mysql_parameter_group" {
name = "qovery-${var.mysql_identifier}"
family = var.parameter_group_family
tags = local.mysql_database_tags
# Set superuser permission to the default 'username' account
parameter {
name = "log_bin_trust_function_creators"
value = "1"
}
}
# Non snapshoted version
resource "aws_db_instance" "mysql_instance" {
identifier = var.mysql_identifier
tags = local.mysql_database_tags
# MySQL instance basics
instance_class = var.instance_class
port = var.port
timeouts {
create = "60m"
update = "120m"
delete = "60m"
}
password = var.password
name = var.database_name
parameter_group_name = aws_db_parameter_group.mysql_parameter_group.name
storage_encrypted = var.encrypt_disk
{%- if snapshot is defined and snapshot["snapshot_id"] %}
# Snapshot
snapshot_identifier = var.snapshot_identifier
{%- else %}
allocated_storage = var.disk_size
storage_type = var.storage_type
username = var.username
engine_version = var.mysql_version
engine = "mysql"
ca_cert_identifier = "rds-ca-2019"
{%- endif %}
# Network
db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id
vpc_security_group_ids = data.aws_security_group.selected.*.id
publicly_accessible = var.publicly_accessible
multi_az = var.multi_az
# Maintenance and upgrades
apply_immediately = var.apply_changes_now
auto_minor_version_upgrade = var.auto_minor_version_upgrade
maintenance_window = var.preferred_maintenance_window
# Monitoring
monitoring_interval = 10
monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn
# Backups
backup_retention_period = var.backup_retention_period
backup_window = var.preferred_backup_window
skip_final_snapshot = var.skip_final_snapshot
{%- if not skip_final_snapshot %}
final_snapshot_identifier = var.final_snapshot_name
{%- endif %}
copy_tags_to_snapshot = true
delete_automated_backups = var.delete_automated_backups
}

View File

@@ -0,0 +1,67 @@
# MySQL instance basics
variable "mysql_identifier" {
description = "MySQL instance name (DB identifier)"
default = "{{ fqdn_id }}"
type = string
}
variable "port" {
description = "MySQL instance port"
default = {{ database_port }}
type = number
}
variable "disk_size" {
description = "disk instance size"
default = {{ database_disk_size_in_gib }}
type = number
}
variable "mysql_version" {
description = "MySQL version"
default = "{{ version }}"
type = string
}
variable "parameter_group_family" {
description = "RDS parameter group family"
default = "{{ parameter_group_family }}"
type = string
}
variable "storage_type" {
description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)."
default = "{{ database_disk_type }}"
type = string
}
variable "encrypt_disk" {
description = "Enable disk encryption"
default = "{{ encrypt_disk }}"
type = string
}
variable "instance_class" {
description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html"
default = "{{database_instance_type}}"
type = string
}
variable "username" {
description = "Admin username for the master DB user"
default = "{{ database_login }}"
type = string
}
variable "password" {
description = "Admin password for the master DB user"
default = "{{ database_password }}"
type = string
}
variable "database_name" {
description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance"
default = "{{ database_name }}"
type = string
}

View File

@@ -0,0 +1,6 @@
locals {
postgres_database_tags = merge (var.database_tags, {
database_identifier = var.postgresql_identifier
creationDate = time_static.on_db_create.rfc3339
})
}

View File

@@ -0,0 +1,121 @@
data "aws_vpc" "selected" {
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
}
data "aws_subnet_ids" "k8s_subnet_ids" {
vpc_id = data.aws_vpc.selected.id
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
filter {
name = "tag:Service"
values = ["RDS"]
}
}
data "aws_security_group" "selected" {
filter {
name = "tag:Name"
values = ["qovery-eks-workers"]
}
filter {
name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}"
values = ["owned"]
}
}
data "aws_iam_role" "rds_enhanced_monitoring" {
name = "qovery-rds-enhanced-monitoring-${var.kubernetes_cluster_id}"
}
resource "helm_release" "postgres_instance_external_name" {
name = "${aws_db_instance.postgresql_instance.id}-externalname"
chart = "external-name-svc"
namespace = "{{namespace}}"
atomic = true
max_history = 50
set {
name = "target_hostname"
value = aws_db_instance.postgresql_instance.address
}
set {
name = "source_fqdn"
value = "{{database_fqdn}}"
}
set {
name = "app_id"
value = "{{database_id}}"
}
set {
name = "service_name"
value = "{{service_name}}"
}
depends_on = [
aws_db_instance.postgresql_instance
]
}
# Non snapshoted version
resource "aws_db_instance" "postgresql_instance" {
identifier = var.postgresql_identifier
tags = local.postgres_database_tags
# Postgres instance basics
instance_class = var.instance_class
port = var.port
timeouts {
create = "60m"
update = "120m"
delete = "60m"
}
password = var.password
storage_encrypted = var.encrypt_disk
{%- if snapshot and snapshot["snapshot_id"] %}
# Snapshot
snapshot_identifier = var.snapshot_identifier
{%- else %}
allocated_storage = var.disk_size
name = var.database_name
storage_type = var.storage_type
username = var.username
engine_version = var.postgresql_version
engine = "postgres"
ca_cert_identifier = "rds-ca-2019"
{%- endif %}
# Network
db_subnet_group_name = data.aws_subnet_ids.k8s_subnet_ids.id
vpc_security_group_ids = data.aws_security_group.selected.*.id
publicly_accessible = var.publicly_accessible
multi_az = var.multi_az
# Maintenance and upgrades
apply_immediately = var.apply_changes_now
auto_minor_version_upgrade = var.auto_minor_version_upgrade
maintenance_window = var.preferred_maintenance_window
# Monitoring
performance_insights_enabled = var.performance_insights_enabled
performance_insights_retention_period = var.performance_insights_enabled_retention
monitoring_interval = 10
monitoring_role_arn = data.aws_iam_role.rds_enhanced_monitoring.arn
# Backups
backup_retention_period = var.backup_retention_period
backup_window = var.preferred_backup_window
skip_final_snapshot = var.skip_final_snapshot
{%- if not skip_final_snapshot %}
final_snapshot_identifier = var.final_snapshot_name
{%- endif %}
copy_tags_to_snapshot = true
delete_automated_backups = var.delete_automated_backups
}

View File

@@ -0,0 +1,61 @@
# PostgreSQL instance basics
variable "postgresql_identifier" {
description = "PostgreSQL instance name (DB identifier)"
default = "{{ fqdn_id }}"
type = string
}
variable "port" {
description = "PostgreSQL instance port"
default = "{{ database_port }}"
type = number
}
variable "disk_size" {
description = "disk instance size"
default = "{{ database_disk_size_in_gib }}"
type = number
}
variable "postgresql_version" {
description = "Postgresql version"
default = "{{ version }}"
type = string
}
variable "storage_type" {
description = "One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD)."
default = "{{ database_disk_type }}"
type = string
}
variable "encrypt_disk" {
description = "Enable disk encryption"
default = "{{ encrypt_disk }}"
type = string
}
variable "instance_class" {
description = "Type of instance: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html"
default = "{{ database_instance_type }}"
type = string
}
variable "username" {
description = "Admin username for the master DB user"
default = "{{ database_login }}"
type = string
}
variable "password" {
description = "Admin password for the master DB user"
default = "{{ database_password }}"
type = string
}
variable "database_name" {
description = "The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance"
default = "{{ database_name }}"
type = string
}

View File

@@ -0,0 +1,7 @@
locals {
redis_database_tags = merge (var.database_tags, {
database_identifier = var.elasticache_identifier
creationDate = time_static.on_db_create.rfc3339
{% if snapshot is defined and snapshot["snapshot_id"] %}meta_last_restored_from = var.snapshot_identifier{% endif %}
})
}

View File

@@ -0,0 +1,114 @@
data "aws_vpc" "selected" {
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
}
data "aws_subnet_ids" "selected" {
vpc_id = data.aws_vpc.selected.id
filter {
name = "tag:ClusterId"
values = [var.kubernetes_cluster_id]
}
filter {
name = "tag:Service"
values = ["Elasticache"]
}
}
data "aws_security_group" "selected" {
filter {
name = "tag:Name"
values = ["qovery-eks-workers"]
}
filter {
name = "tag:kubernetes.io/cluster/${var.kubernetes_cluster_id}"
values = ["owned"]
}
}
resource "helm_release" "elasticache_instance_external_name" {
name = "${aws_elasticache_cluster.elasticache_cluster.id}-externalname"
chart = "external-name-svc"
namespace = "{{namespace}}"
atomic = true
max_history = 50
set {
name = "target_hostname"
value = aws_elasticache_cluster.elasticache_cluster.cache_nodes.0.address
}
set {
name = "source_fqdn"
value = "{{database_fqdn}}"
}
set {
name = "app_id"
value = "{{database_id}}"
}
set {
name = "service_name"
value = "{{service_name}}"
}
set {
name = "publicly_accessible"
value = var.publicly_accessible
}
depends_on = [
aws_elasticache_cluster.elasticache_cluster
]
}
resource "aws_elasticache_cluster" "elasticache_cluster" {
cluster_id = var.elasticache_identifier
tags = local.redis_database_tags
# Elasticache instance basics
port = var.port
engine_version = var.elasticache_version
# Thanks GOD AWS for not using SemVer and adding your own versioning system,
# need to add this dirty trick while Hashicorp fix this issue
# https://github.com/hashicorp/terraform-provider-aws/issues/15625
lifecycle {
ignore_changes = [engine_version]
}
{%- if replication_group_id is defined %}
# todo: add cluster mode and replicas support
{%- else %}
engine = "redis"
node_type = var.instance_class
num_cache_nodes = var.elasticache_instances_number
parameter_group_name = var.parameter_group_name
{%- endif %}
{%- if snapshot is defined and snapshot["snapshot_id"] %}
# Snapshot
snapshot_name = var.snapshot_identifier
{%- endif %}
# Network
# WARNING: this value cna't get fetch from data sources and is linked to the bootstrap phase
subnet_group_name = "elasticache-${data.aws_vpc.selected.id}"
# Security
security_group_ids = data.aws_security_group.selected.*.id
# Maintenance and upgrades
apply_immediately = var.apply_changes_now
maintenance_window = var.preferred_maintenance_window
# Backups
snapshot_window = var.preferred_backup_window
snapshot_retention_limit = var.backup_retention_period
{%- if not skip_final_snapshot %}
final_snapshot_identifier = var.final_snapshot_name
{%- endif %}
}

View File

@@ -0,0 +1,37 @@
# elasticache instance basics
variable "elasticache_identifier" {
description = "Elasticache cluster name (Cluster identifier)"
default = "{{ fqdn_id }}"
type = string
}
variable "elasticache_version" {
description = "Elasticache version"
default = "{{ version }}"
type = string
}
variable "parameter_group_name" {
description = "Elasticache parameter group name"
default = "{{ database_elasticache_parameter_group_name }}"
type = string
}
variable "elasticache_instances_number" {
description = "Elasticache instance numbers"
default = 1
type = number
}
variable "port" {
description = "Elasticache instance port"
default = {{ database_port }}
type = number
}
variable "instance_class" {
description = "Type of instance: https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html"
default = "{{database_instance_type}}"
type = string
}

View File

@@ -17,10 +17,14 @@ controller:
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
publishService:
enabled: true
service:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
external-dns.alpha.kubernetes.io/hostname: "{{ wildcard_managed_dns }}"
externalTrafficPolicy: "Local"
sessionAffinity: ""
healthCheckNodePort: 0

View File

@@ -1,6 +1,6 @@
apiVersion: v1
appVersion: 3.13.4
appVersion: 3.19.1
description: A Helm chart for installing Calico on AWS
icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png
name: aws-calico
version: 0.3.1
version: 0.3.10

View File

@@ -1,7 +1,11 @@
# Calico on AWS
**Note**: The recommended way to install calico on EKS is via tigera-opeartor instead of this helm-chart.
You can follow https://docs.aws.amazon.com/eks/latest/userguide/calico.html for detailed instructions.
This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html
## Prerequisites
- Kubernetes 1.11+ running on AWS
@@ -38,26 +42,32 @@ If you receive an error similar to `Error: release aws-calico failed: <resource>
The following table lists the configurable parameters for this chart and their default values.
| Parameter | Description | Default |
|----------------------------------------|---------------------------------------------------------|---------------------------------|
| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` |
| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` |
| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` |
| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` |
| `calico.node.image` | Calico Node Image | `quay.io/calico/node` |
| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` |
| `calico.node.logseverity` | Calico Node Log Severity | `Info` |
| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` |
| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.tag` | Calico version | `v3.8.1` |
| `fullnameOverride` | Override the fullname of the chart | `calico` |
| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` |
| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` |
| Parameter | Description | Default |
|------------------------------------------|---------------------------------------------------------|---------------------------------|
| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` |
| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` |
| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` |
| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.typha.podAnnotations` | Calico Typha Node Pod Annotations | `{}` |
| `calico.typha.podLabels` | Calico Typha Node Pod Labels | `{}` |
| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` |
| `calico.node.image` | Calico Node Image | `quay.io/calico/node` |
| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` |
| `calico.node.logseverity` | Calico Node Log Severity | `Info` |
| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.node.podAnnotations` | Calico Node Pod Annotations | `{}` |
| `calico.node.podLabels` | Calico Node Pod Labels | `{}` |
| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` |
| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.typha_autoscaler.podAnnotations` | Calico Typha Autoscaler Pod Annotations | `{}` |
| `calico.typha_autoscaler.podLabels` | Calico Typha Autoscaler Pod Labels | `{}` |
| `calico.tag` | Calico version | `v3.8.1` |
| `fullnameOverride` | Override the fullname of the chart | `calico` |
| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` |
| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters:

View File

@@ -9,6 +9,9 @@ spec:
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
{{- if .Values.calico.node.podLabels }}
{{ toYaml .Values.calico.node.podLabels | indent 6 }}
{{- end }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
@@ -17,8 +20,23 @@ spec:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
{{- if .Values.calico.node.podLabels }}
{{ toYaml .Values.calico.node.podLabels | indent 8 }}
{{- end }}
{{- with .Values.calico.node.podAnnotations }}
annotations: {{- toYaml . | nindent 8 }}
{{- end }}
spec:
priorityClassName: system-node-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "eks.amazonaws.com/compute-type"
operator: NotIn
values:
- fargate
nodeSelector:
{{- toYaml .Values.calico.node.nodeSelector | nindent 8 }}
hostNetwork: true
@@ -70,6 +88,8 @@ spec:
value: "none"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: FELIX_ROUTESOURCE
value: "WorkloadIPs"
- name: NO_DEFAULT_POOLS
value: "true"
# Set based on the k8s node name.
@@ -95,12 +115,14 @@ spec:
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 5
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
periodSeconds: 10
timeoutSeconds: 5
resources:
{{- toYaml .Values.calico.node.resources | nindent 12 }}
volumeMounts:

View File

@@ -10,12 +10,21 @@ spec:
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
{{- if .Values.calico.typha.podLabels }}
{{ toYaml .Values.calico.typha.podLabels | indent 6 }}
{{- end }}
template:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
{{- if .Values.calico.typha.podLabels }}
{{ toYaml .Values.calico.typha.podLabels | indent 8 }}
{{- end }}
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
{{- with .Values.calico.typha.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
priorityClassName: system-cluster-critical
nodeSelector:
@@ -24,9 +33,9 @@ spec:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
{{- if .Values.calico.typha.tolerations }}
{{ toYaml .Values.calico.typha.tolerations | indent 10 }}
{{- end }}
{{- if .Values.calico.typha.tolerations }}
{{- toYaml .Values.calico.typha.tolerations | nindent 8 }}
{{- end }}
hostNetwork: true
serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node"
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
@@ -97,11 +106,20 @@ spec:
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler"
{{- if .Values.calico.typha_autoscaler.podLabels }}
{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 6 }}
{{- end }}
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler"
{{- if .Values.calico.typha_autoscaler.podLabels }}
{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 8 }}
{{- end }}
{{- with .Values.calico.typha_autoscaler.podAnnotations }}
annotations: {{- toYaml . | nindent 8 }}
{{- end }}
spec:
priorityClassName: system-cluster-critical
nodeSelector:
@@ -110,9 +128,9 @@ spec:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
{{- if .Values.calico.typha_autoscaler.tolerations }}
{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }}
{{- end }}
{{- if .Values.calico.typha_autoscaler.tolerations }}
{{- toYaml .Values.calico.typha_autoscaler.tolerations | nindent 8 }}
{{- end }}
containers:
- image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}"
name: autoscaler

View File

@@ -15,6 +15,14 @@ rules:
- configmaps
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints

View File

@@ -7,7 +7,7 @@ podSecurityPolicy:
create: false
calico:
tag: v3.13.4
tag: v3.19.1
typha:
logseverity: Info #Debug, Info, Warning, Error, Fatal
@@ -22,6 +22,8 @@ calico:
tolerations: []
nodeSelector:
beta.kubernetes.io/os: linux
podAnnotations: {}
podLabels: {}
node:
logseverity: Info #Debug, Info, Warning, Error, Fatal
image: quay.io/calico/node
@@ -37,6 +39,8 @@ calico:
# value: 'some value'
nodeSelector:
beta.kubernetes.io/os: linux
podAnnotations: {}
podLabels: {}
typha_autoscaler:
resources:
requests:
@@ -48,7 +52,9 @@ calico:
tolerations: []
nodeSelector:
beta.kubernetes.io/os: linux
podAnnotations: {}
podLabels: {}
autoscaler:
tag: "1.7.1"
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64
tag: "1.8.3"
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64

View File

@@ -20,3 +20,4 @@
.idea/
*.tmproj
.vscode/
example-values*.yaml

View File

@@ -1,27 +1,25 @@
apiVersion: v1
appVersion: 1.5.0
description: A Helm chart for the AWS Node Termination Handler
apiVersion: v2
appVersion: 1.14.1
description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/eks-charts
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
keywords:
- aws
- eks
- ec2
- node-termination
- spot
kubeVersion: '>= 1.16-0'
maintainers:
- email: nckturner@users.noreply.github.com
name: Nicholas Turner
url: https://github.com/nckturner
- email: stefanprodan@users.noreply.github.com
name: Stefan Prodan
url: https://github.com/stefanprodan
- email: bwagner5@users.noreply.github.com
name: Brandon Wagner
url: https://github.com/bwagner5
- email: jillmon@users.noreply.github.com
name: Jillian Montalvo
name: Jillian Kuentz
url: https://github.com/jillmon
- email: mattrandallbecker@users.noreply.github.com
name: Matthew Becker
url: https://github.com/mattrandallbecker
name: aws-node-termination-handler
sources:
- https://github.com/aws/eks-charts
version: 0.8.0
- https://github.com/aws/aws-node-termination-handler/
- https://github.com/aws/eks-charts/
type: application
version: 0.16.1

View File

@@ -1,96 +1,170 @@
# AWS Node Termination Handler
AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler.
AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at [github.com/aws/aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler).
## Prerequisites
* Kubernetes >= 1.11
- _Kubernetes_ >= v1.16
## Installing the Chart
Add the EKS repository to Helm:
```sh
helm repo add eks https://aws.github.io/eks-charts
```
Install AWS Node Termination Handler:
To install the chart with the release name aws-node-termination-handler and default configuration:
Before you can install the chart you will need to add the `aws` repo to [Helm](https://helm.sh/).
```sh
helm install --name aws-node-termination-handler \
--namespace kube-system eks/aws-node-termination-handler
```shell
helm repo add eks https://aws.github.io/eks-charts/
```
To install into an EKS cluster where the Node Termination Handler is already installed, you can run:
After you've installed the repo you can install the chart, the following command will install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace.
```sh
helm upgrade --install --recreate-pods --force \
aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler
```shell
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler
```
If you receive an error similar to `Error: release aws-node-termination-handler
failed: <resource> "aws-node-termination-handler" already exists`, simply rerun
the above command.
To install the chart on an EKS cluster where the AWS Node Termination Handler is already installed, you can run the following command.
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `aws-node-termination-handler` deployment:
```sh
helm delete --purge aws-node-termination-handler
```shell
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler --recreate-pods --force
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
If you receive an error similar to the one below simply rerun the above command.
> Error: release aws-node-termination-handler failed: <resource> "aws-node-termination-handler" already exists
To uninstall the `aws-node-termination-handler` chart installation from the `kube-system` namespace run the following command.
```shell
helm delete --namespace kube-system aws-node-termination-handler
```
## Configuration
The following tables lists the configurable parameters of the chart and their default values.
The following tables lists the configurable parameters of the chart and their default values. These values are split up into the [common configuration](#common-configuration) shared by all AWS Node Termination Handler modes, [queue configuration](#queue-processor-mode-configuration) used when AWS Node Termination Handler is in in queue-processor mode, and [IMDS configuration](#imds-mode-configuration) used when AWS Node Termination Handler is in IMDS mode; for more information about the different modes see the project [README](https://github.com/aws/aws-node-termination-handler/blob/main/README.md).
Parameter | Description | Default
--- | --- | ---
`image.repository` | image repository | `amazon/aws-node-termination-handler`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`image.pullSecrets` | image pull secrets (for private docker registries) | `[]`
`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false`
`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30`
`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30`
`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120`
`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true`
`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80`
`webhookURL` | Posts event data to URL upon instance interruption action | ``
`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | ``
`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}`
`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}`
`dryRun` | If true, only log if a node would be drained | `false`
`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false`
`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true`
`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3`
`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false`
`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false`
`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false`
`affinity` | node/pod affinities | None
`podAnnotations` | annotations to add to each pod | `{}`
`priorityClassName` | Name of the priorityClass | `system-node-critical`
`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi`
`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet`
`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}`
`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]`
`rbac.create` | if `true`, create and use RBAC resources | `true`
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
`serviceAccount.create` | If `true`, create a new service account | `true`
`serviceAccount.name` | Service account to be used | None
`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}`
`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime`
`securityContext.runAsUserID` | User ID to run the container | `1000`
`securityContext.runAsGroupID` | Group ID to run the container | `1000`
`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os`
`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch`
`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false`
`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092`
### Common Configuration
## Metrics endpoint consideration
If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace
since it's listening on all interfaces.
Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
The configuration in this table applies to all AWS Node Termination Handler modes.
| Parameter | Description | Default |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- |
| `image.repository` | Image repository. | `public.ecr.aws/aws-ec2/aws-node-termination-handler` |
| `image.tag` | Image tag. | `v{{ .Chart.AppVersion}}` |
| `image.pullPolicy` | Image pull policy. | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets. | `[]` |
| `nameOverride` | Override the `name` of the chart. | `""` |
| `fullnameOverride` | Override the `fullname` of the chart. | `""` |
| `serviceAccount.create` | If `true`, create a new service account. | `true` |
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` |
| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` |
| `rbac.create` | If `true`, create the RBAC resources. | `true` |
| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `true` |
| `customLabels` | Labels to add to all resource metadata. | `{}` |
| `podLabels` | Labels to add to the pod. | `{}` |
| `podAnnotations` | Annotations to add to the pod. | `{}` |
| `podSecurityContext` | Security context for the pod. | _See values.yaml_ |
| `securityContext` | Security context for the _aws-node-termination-handler_ container. | _See values.yaml_ |
| `terminationGracePeriodSeconds` | The termination grace period for the pod. | `nil` |
| `resources` | Resource requests and limits for the _aws-node-termination-handler_ container. | `{}` |
| `nodeSelector` | Expressions to select a node by it's labels for pod assignment. In IMDS mode this has a higher priority than `daemonsetNodeSelector` (for backwards compatibility) but shouldn't be used. | `{}` |
| `affinity` | Affinity settings for pod assignment. In IMDS mode this has a higher priority than `daemonsetAffinity` (for backwards compatibility) but shouldn't be used. | `{}` |
| `tolerations` | Tolerations for pod assignment. In IMDS mode this has a higher priority than `daemonsetTolerations` (for backwards compatibility) but shouldn't be used. | `[]` |
| `extraEnv` | Additional environment variables for the _aws-node-termination-handler_ container. | `[]` |
| `probes` | The Kubernetes liveness probe configuration. | _See values.yaml_ |
| `logLevel` | Sets the log level (`info`,`debug`, or `error`) | `info` |
| `jsonLogging` | If `true`, use JSON-formatted logs instead of human readable logs. | `false` |
| `enablePrometheusServer` | If `true`, start an http server exposing `/metrics` endpoint for _Prometheus_. | `false` |
| `prometheusServerPort` | Replaces the default HTTP port for exposing _Prometheus_ metrics. | `9092` |
| `dryRun` | If `true`, only log if a node would be drained. | `false` |
| `cordonOnly` | If `true`, nodes will be cordoned but not drained when an interruption event occurs. | `false` |
| `taintNode` | If `true`, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance`, `aws-node-termination-handler/spot-itn`, `aws-node-termination-handler/asg-lifecycle-termination` and `aws-node-termination-handler/rebalance-recommendation`. | `false` |
| `deleteLocalData` | If `true`, continue even if there are pods using local data that will be deleted when the node is drained. | `true` |
| `ignoreDaemonSets` | If `true`, skip terminating daemon set managed pods. | `true` |
| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` |
| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` |
| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` |
| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` |
| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` |
| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` |
| `webhookHeaders` | Replace the default webhook headers (e.g. `{"Content-type":"application/json"}`). | `""` |
| `webhookProxy` | Uses the specified HTTP(S) proxy for sending webhook data. | `""` |
| `webhookTemplate` | Replaces the default webhook message template (e.g. `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Instance: {{ .InstanceID }} - Node: {{ .NodeName }} - Description: {{ .Description }} - Start Time: {{ .StartTime }}"}`). | `""` |
| `webhookTemplateConfigMapName` | Pass the webhook template file as a configmap. | "``" |
| `webhookTemplateConfigMapKey` | Name of the Configmap key storing the template file. | `""` |
| `enableSqsTerminationDraining` | If `true`, this turns on queue-processor mode which drains nodes when an SQS termination event is received. | `false` |
### Queue-Processor Mode Configuration
The configuration in this table applies to AWS Node Termination Handler in queue-processor mode.
| Parameter | Description | Default |
| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
| `replicas` | The number of replicas in the deployment when using queue-processor mode (NOTE: increasing replicas may cause duplicate webhooks since pods are stateless). | `1` |
| `strategy` | Specify the update strategy for the deployment. | `{}` |
| `podDisruptionBudget` | Limit the disruption for controller pods, requires at least 2 controller replicas. | `{}` |
| `serviceMonitor.create` | If `true`, create a ServiceMonitor. This requires `enablePrometheusServer: true`. | `false` |
| `serviceMonitor.namespace` | Override ServiceMonitor _Helm_ release namespace. | `nil` |
| `serviceMonitor.labels` | Additional ServiceMonitor metadata labels. | `{}` |
| `serviceMonitor.interval` | _Prometheus_ scrape interval. | `30s` |
| `serviceMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` |
| `priorityClassName` | Name of the PriorityClass to use for the Deployment. | `system-cluster-critical` |
| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` |
| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` |
| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` |
| `checkASGTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. | `true` |
| `managedAsgTag` | The node tag to check if `checkASGTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` |
| `assumeAsgTagPropagation` | If `true`, assume that ASG tags will be appear on the ASG's instances. | `false` |
### IMDS Mode Configuration
The configuration in this table applies to AWS Node Termination Handler in IMDS mode.
| Parameter | Description | Default |
| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` |
| `linuxPodLabels` | Labels to add to each Linux pod. | `{}` |
| `windowsPodLabels` | Labels to add to each Windows pod. | `{}` |
| `linuxPodAnnotations` | Annotations to add to each Linux pod. | `{}` |
| `windowsPodAnnotations` | Annotations to add to each Windows pod. | `{}` |
| `updateStrategy` | Update strategy for the all DaemonSets. | _See values.yaml_ |
| `daemonsetPriorityClassName` | Name of the PriorityClass to use for all DaemonSets. | `system-node-critical` |
| `podMonitor.create` | If `true`, create a PodMonitor. This requires `enablePrometheusServer: true`. | `false` |
| `podMonitor.namespace` | Override PodMonitor _Helm_ release namespace. | `nil` |
| `podMonitor.labels` | Additional PodMonitor metadata labels | `{}` |
| `podMonitor.interval` | _Prometheus_ scrape interval. | `30s` |
| `podMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` |
| `useHostNetwork` | If `true`, enables `hostNetwork` for the Linux DaemonSet. NOTE: setting this to `false` may cause issues accessing IMDSv2 if your account is not configured with an IP hop count of 2 see [Metrics Endpoint Considerations](#metrics-endpoint-considerations) | `true` |
| `dnsPolicy` | If specified, this overrides `linuxDnsPolicy` and `windowsDnsPolicy` with a single policy. | `""` |
| `linuxDnsPolicy` | DNS policy for the Linux DaemonSet. | `""` |
| `windowsDnsPolicy` | DNS policy for the Windows DaemonSet. | `""` |
| `daemonsetNodeSelector` | Expressions to select a node by it's labels for DaemonSet pod assignment. For backwards compatibility the `nodeSelector` value has priority over this but shouldn't be used. | `{}` |
| `linuxNodeSelector` | Override `daemonsetNodeSelector` for the Linux DaemonSet. | `{}` |
| `windowsNodeSelector` | Override `daemonsetNodeSelector` for the Windows DaemonSet. | `{}` |
| `daemonsetAffinity` | Affinity settings for DaemonSet pod assignment. For backwards compatibility the `affinity` has priority over this but shouldn't be used. | `{}` |
| `linuxAffinity` | Override `daemonsetAffinity` for the Linux DaemonSet. | `{}` |
| `windowsAffinity` | Override `daemonsetAffinity` for the Windows DaemonSet. | `{}` |
| `daemonsetTolerations` | Tolerations for DaemonSet pod assignment. For backwards compatibility the `tolerations` has priority over this but shouldn't be used. | `[]` |
| `linuxTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` |
| `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` |
| `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` |
| `metadataTries` | The number of times to try requesting metadata. | `3` |
| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. | `true` |
| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. This is **EXPERIMENTAL**. | `false` |
| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. | `false` |
| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. | `false` |
### Testing Configuration
The configuration in this table applies to AWS Node Termination Handler testing and is **NOT RECOMMENDED** FOR PRODUCTION DEPLOYMENTS.
| Parameter | Description | Default |
| --------------------- | --------------------------------------------------------------------------------- | -------------- |
| `awsEndpoint` | (Used for testing) If specified, use the provided AWS endpoint to make API calls. | `""` |
| `awsSecretAccessKey` | (Used for testing) Pass-thru environment variable. | `nil` |
| `awsAccessKeyID` | (Used for testing) Pass-thru environment variable. | `nil` |
| `instanceMetadataURL` | (Used for testing) If specified, use the provided metadata URL. | `""` |
| `procUptimeFile` | (Used for Testing) Specify the uptime file. | `/proc/uptime` |
## Metrics Endpoint Considerations
AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).

View File

@@ -1,4 +1,5 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
@@ -25,17 +26,11 @@ If release name contains chart name it will be used as a full name.
{{- end -}}
{{/*
Common labels
Equivalent to "aws-node-termination-handler.fullname" except that "-win" indicator is appended to the end.
Name will not exceed 63 characters.
*/}}
{{- define "aws-node-termination-handler.labels" -}}
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node-termination-handler
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- define "aws-node-termination-handler.fullnameWindows" -}}
{{- include "aws-node-termination-handler.fullname" . | trunc 59 | trimSuffix "-" | printf "%s-win" -}}
{{- end -}}
{{/*
@@ -45,6 +40,47 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-node-termination-handler.labels" -}}
{{ include "aws-node-termination-handler.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/component: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }}
{{- with .Values.customLabels }}
{{ toYaml . }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "aws-node-termination-handler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Selector labels for the deployment
*/}}
{{- define "aws-node-termination-handler.selectorLabelsDeployment" -}}
{{ include "aws-node-termination-handler.selectorLabels" . }}
app.kubernetes.io/component: deployment
{{- end -}}
{{/*
Selector labels for the daemonset
*/}}
{{- define "aws-node-termination-handler.selectorLabelsDaemonset" -}}
{{ include "aws-node-termination-handler.selectorLabels" . }}
app.kubernetes.io/component: daemonset
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
@@ -55,3 +91,19 @@ Create the name of the service account to use
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
The image to use
*/}}
{{- define "aws-node-termination-handler.image" -}}
{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- end }}
{{/* Get PodDisruptionBudget API Version */}}
{{- define "aws-node-termination-handler.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}}
{{- print "policy/v1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@@ -1,7 +1,10 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
@@ -9,6 +12,7 @@ rules:
- nodes
verbs:
- get
- list
- patch
- update
- apiGroups:
@@ -17,6 +21,7 @@ rules:
- pods
verbs:
- list
- get
- apiGroups:
- ""
resources:
@@ -35,3 +40,13 @@ rules:
- daemonsets
verbs:
- get
{{- if .Values.emitKubernetesEvents }}
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
{{- end }}
{{- end -}}

View File

@@ -1,12 +1,16 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "aws-node-termination-handler.fullname" . }}
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -0,0 +1,198 @@
{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "linux") -}}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
spec:
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }}
kubernetes.io/os: linux
template:
metadata:
labels:
{{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }}
kubernetes.io/os: linux
k8s-app: aws-node-termination-handler
{{- with (mergeOverwrite (dict) .Values.podLabels .Values.linuxPodLabels) }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.podAnnotations .Values.linuxPodAnnotations }}
annotations:
{{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.linuxPodAnnotations) | nindent 8 }}
{{- end }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.daemonsetPriorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.useHostNetwork }}
dnsPolicy: {{ default .Values.linuxDnsPolicy .Values.dnsPolicy }}
containers:
- name: aws-node-termination-handler
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: {{ include "aws-node-termination-handler.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: {{ .Values.enableProbesServer | quote }}
- name: PROBES_SERVER_PORT
value: {{ .Values.probes.httpGet.port | quote }}
- name: PROBES_SERVER_ENDPOINT
value: {{ .Values.probes.httpGet.path | quote }}
- name: LOG_LEVEL
value: {{ .Values.logLevel | quote }}
- name: JSON_LOGGING
value: {{ .Values.jsonLogging | quote }}
- name: ENABLE_PROMETHEUS_SERVER
value: {{ .Values.enablePrometheusServer | quote }}
- name: PROMETHEUS_SERVER_PORT
value: {{ .Values.prometheusServerPort | quote }}
{{- with .Values.instanceMetadataURL }}
- name: INSTANCE_METADATA_URL
value: {{ . | quote }}
{{- end }}
- name: METADATA_TRIES
value: {{ .Values.metadataTries | quote }}
- name: DRY_RUN
value: {{ .Values.dryRun | quote }}
- name: CORDON_ONLY
value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE
value: {{ .Values.taintNode | quote }}
- name: DELETE_LOCAL_DATA
value: {{ .Values.deleteLocalData | quote }}
- name: IGNORE_DAEMON_SETS
value: {{ .Values.ignoreDaemonSets | quote }}
- name: POD_TERMINATION_GRACE_PERIOD
value: {{ .Values.podTerminationGracePeriod | quote }}
- name: NODE_TERMINATION_GRACE_PERIOD
value: {{ .Values.nodeTerminationGracePeriod | quote }}
- name: EMIT_KUBERNETES_EVENTS
value: {{ .Values.emitKubernetesEvents | quote }}
{{- with .Values.kubernetesEventsExtraAnnotations }}
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
value: {{ . | quote }}
{{- end }}
{{- if or .Values.webhookURL .Values.webhookURLSecretName }}
- name: WEBHOOK_URL
{{- if .Values.webhookURLSecretName }}
valueFrom:
secretKeyRef:
name: {{ .Values.webhookURLSecretName }}
key: webhookurl
{{- else }}
value: {{ .Values.webhookURL | quote }}
{{- end }}
{{- end }}
{{- with .Values.webhookHeaders }}
- name: WEBHOOK_HEADERS
value: {{ . | quote }}
{{- end }}
{{- with .Values.webhookProxy }}
- name: WEBHOOK_PROXY
value: {{ . | quote }}
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
- name: WEBHOOK_TEMPLATE_FILE
value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }}
{{- else if .Values.webhookTemplate }}
- name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }}
{{- end }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: {{ .Values.enableSpotInterruptionDraining | quote }}
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: {{ .Values.enableScheduledEventDraining | quote }}
- name: ENABLE_REBALANCE_MONITORING
value: {{ .Values.enableRebalanceMonitoring | quote }}
- name: ENABLE_REBALANCE_DRAINING
value: {{ .Values.enableRebalanceDraining | quote }}
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
- name: UPTIME_FROM_FILE
value: {{ .Values.procUptimeFile | quote }}
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
ports:
{{- if .Values.enableProbesServer }}
- name: liveness-probe
protocol: TCP
containerPort: {{ .Values.probes.httpGet.port }}
{{- end }}
{{- if .Values.enablePrometheusServer }}
- name: http-metrics
protocol: TCP
containerPort: {{ .Values.prometheusServerPort }}
{{- end }}
{{- end }}
{{- if .Values.enableProbesServer }}
livenessProbe:
{{- toYaml .Values.probes | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: uptime
mountPath: {{ .Values.procUptimeFile }}
readOnly: true
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
- name: webhook-template
mountPath: /config/
{{- end }}
volumes:
- name: uptime
hostPath:
path: {{ .Values.procUptimeFile | default "/proc/uptime" }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
- name: webhook-template
configMap:
name: {{ .Values.webhookTemplateConfigMapName }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.linuxNodeSelector) }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.linuxAffinity) }}
affinity:
{{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.linuxAffinity)) | nindent 8 }}
{{- end }}
{{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.linuxTolerations) }}
tolerations:
{{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.linuxTolerations )) | nindent 8 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,192 @@
{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "windows") -}}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "aws-node-termination-handler.fullnameWindows" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
spec:
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }}
kubernetes.io/os: windows
template:
metadata:
labels:
{{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }}
kubernetes.io/os: windows
k8s-app: aws-node-termination-handler
{{- with (mergeOverwrite (dict) .Values.podLabels .Values.windowsPodLabels) }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.podAnnotations .Values.windowsPodAnnotations }}
annotations:
{{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.windowsPodAnnotations) | nindent 8 }}
{{- end }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.daemonsetPriorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: false
dnsPolicy: {{ default .Values.windowsDnsPolicy .Values.dnsPolicy }}
containers:
- name: aws-node-termination-handler
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: {{ include "aws-node-termination-handler.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: {{ .Values.enableProbesServer | quote }}
- name: PROBES_SERVER_PORT
value: {{ .Values.probes.httpGet.port | quote }}
- name: PROBES_SERVER_ENDPOINT
value: {{ .Values.probes.httpGet.path | quote }}
- name: LOG_LEVEL
value: {{ .Values.logLevel | quote }}
- name: JSON_LOGGING
value: {{ .Values.jsonLogging | quote }}
- name: ENABLE_PROMETHEUS_SERVER
value: {{ .Values.enablePrometheusServer | quote }}
- name: PROMETHEUS_SERVER_PORT
value: {{ .Values.prometheusServerPort | quote }}
{{- with .Values.instanceMetadataURL }}
- name: INSTANCE_METADATA_URL
value: {{ . | quote }}
{{- end }}
- name: METADATA_TRIES
value: {{ .Values.metadataTries | quote }}
- name: DRY_RUN
value: {{ .Values.dryRun | quote }}
- name: CORDON_ONLY
value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE
value: {{ .Values.taintNode | quote }}
- name: DELETE_LOCAL_DATA
value: {{ .Values.deleteLocalData | quote }}
- name: IGNORE_DAEMON_SETS
value: {{ .Values.ignoreDaemonSets | quote }}
- name: POD_TERMINATION_GRACE_PERIOD
value: {{ .Values.podTerminationGracePeriod | quote }}
- name: NODE_TERMINATION_GRACE_PERIOD
value: {{ .Values.nodeTerminationGracePeriod | quote }}
- name: EMIT_KUBERNETES_EVENTS
value: {{ .Values.emitKubernetesEvents | quote }}
{{- with .Values.kubernetesEventsExtraAnnotations }}
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
value: {{ . | quote }}
{{- end }}
{{- if or .Values.webhookURL .Values.webhookURLSecretName }}
- name: WEBHOOK_URL
{{- if .Values.webhookURLSecretName }}
valueFrom:
secretKeyRef:
name: {{ .Values.webhookURLSecretName }}
key: webhookurl
{{- else }}
value: {{ .Values.webhookURL | quote }}
{{- end }}
{{- end }}
{{- with .Values.webhookHeaders }}
- name: WEBHOOK_HEADERS
value: {{ . | quote }}
{{- end }}
{{- with .Values.webhookProxy }}
- name: WEBHOOK_PROXY
value: {{ . | quote }}
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
- name: WEBHOOK_TEMPLATE_FILE
value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }}
{{- else if .Values.webhookTemplate }}
- name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }}
{{- end }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: {{ .Values.enableSpotInterruptionDraining | quote }}
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: {{ .Values.enableScheduledEventDraining | quote }}
- name: ENABLE_REBALANCE_MONITORING
value: {{ .Values.enableRebalanceMonitoring | quote }}
- name: ENABLE_REBALANCE_DRAINING
value: {{ .Values.enableRebalanceDraining | quote }}
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "false"
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
ports:
{{- if .Values.enableProbesServer }}
- name: liveness-probe
protocol: TCP
containerPort: {{ .Values.probes.httpGet.port }}
hostPort: {{ .Values.probes.httpGet.port }}
{{- end }}
{{- if .Values.enablePrometheusServer }}
- name: http-metrics
protocol: TCP
containerPort: {{ .Values.prometheusServerPort }}
hostPort: {{ .Values.prometheusServerPort }}
{{- end }}
{{- end }}
{{- if .Values.enableProbesServer }}
livenessProbe:
{{- toYaml .Values.probes | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
volumeMounts:
- name: webhook-template
mountPath: /config/
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
volumes:
- name: webhook-template
configMap:
name: {{ .Values.webhookTemplateConfigMapName }}
{{- end }}
nodeSelector:
kubernetes.io/os: windows
{{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.windowsNodeSelector) }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.windowsAffinity) }}
affinity:
{{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.windowsAffinity )) | nindent 8 }}
{{- end }}
{{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.windowsTolerations) }}
tolerations:
{{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.windowsTolerations )) | nindent 8 }}
{{- end }}
{{- end -}}

View File

@@ -1,141 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
spec:
updateStrategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node-termination-handler
spec:
volumes:
- name: "uptime"
hostPath:
path: "{{ .Values.procUptimeFile }}"
priorityClassName: "{{ .Values.priorityClassName }}"
affinity:
nodeAffinity:
# NOTE(jaypipes): Change when we complete
# https://github.com/aws/aws-node-termination-handler/issues/8
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }}
operator: In
values:
- linux
- key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }}
operator: In
values:
- amd64
- arm
- arm64
serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }}
hostNetwork: true
dnsPolicy: {{ .Values.dnsPolicy }}
containers:
- name: {{ include "aws-node-termination-handler.name" . }}
image: {{ .Values.image.repository}}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{ .Values.securityContext.runAsUserID }}
runAsGroup: {{ .Values.securityContext.runAsGroupID }}
allowPrivilegeEscalation: false
volumeMounts:
- name: "uptime"
mountPath: "/proc/uptime"
readOnly: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SPOT_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DELETE_LOCAL_DATA
value: {{ .Values.deleteLocalData | quote }}
- name: IGNORE_DAEMON_SETS
value: {{ .Values.ignoreDaemonSets | quote }}
- name: GRACE_PERIOD
value: {{ .Values.gracePeriod | quote }}
- name: POD_TERMINATION_GRACE_PERIOD
value: {{ .Values.podTerminationGracePeriod | quote }}
- name: INSTANCE_METADATA_URL
value: {{ .Values.instanceMetadataURL | quote }}
- name: NODE_TERMINATION_GRACE_PERIOD
value: {{ .Values.nodeTerminationGracePeriod | quote }}
- name: WEBHOOK_URL
value: {{ .Values.webhookURL | quote }}
- name: WEBHOOK_HEADERS
value: {{ .Values.webhookHeaders | quote }}
- name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }}
- name: DRY_RUN
value: {{ .Values.dryRun | quote }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: {{ .Values.enableSpotInterruptionDraining | quote }}
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: {{ .Values.enableScheduledEventDraining | quote }}
- name: METADATA_TRIES
value: {{ .Values.metadataTries | quote }}
- name: CORDON_ONLY
value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE
value: {{ .Values.taintNode | quote }}
- name: JSON_LOGGING
value: {{ .Values.jsonLogging | quote }}
- name: WEBHOOK_PROXY
value: {{ .Values.webhookProxy | quote }}
- name: ENABLE_PROMETHEUS_SERVER
value: {{ .Values.enablePrometheusServer | quote }}
- name: PROMETHEUS_SERVER_PORT
value: {{ .Values.prometheusServerPort | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,202 @@
{{- if .Values.enableSqsTerminationDraining }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicas }}
{{- with .Values.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }}
template:
metadata:
labels:
{{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 8 }}
k8s-app: aws-node-termination-handler
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
containers:
- name: aws-node-termination-handler
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: {{ include "aws-node-termination-handler.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "true"
- name: PROBES_SERVER_PORT
value: {{ .Values.probes.httpGet.port | quote }}
- name: PROBES_SERVER_ENDPOINT
value: {{ .Values.probes.httpGet.path | quote }}
- name: LOG_LEVEL
value: {{ .Values.logLevel | quote }}
- name: JSON_LOGGING
value: {{ .Values.jsonLogging | quote }}
- name: ENABLE_PROMETHEUS_SERVER
value: {{ .Values.enablePrometheusServer | quote }}
- name: PROMETHEUS_SERVER_PORT
value: {{ .Values.prometheusServerPort | quote }}
- name: CHECK_ASG_TAG_BEFORE_DRAINING
value: {{ .Values.checkASGTagBeforeDraining | quote }}
- name: MANAGED_ASG_TAG
value: {{ .Values.managedAsgTag | quote }}
- name: ASSUME_ASG_TAG_PROPAGATION
value: {{ .Values.assumeAsgTagPropagation | quote }}
- name: DRY_RUN
value: {{ .Values.dryRun | quote }}
- name: CORDON_ONLY
value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE
value: {{ .Values.taintNode | quote }}
- name: DELETE_LOCAL_DATA
value: {{ .Values.deleteLocalData | quote }}
- name: IGNORE_DAEMON_SETS
value: {{ .Values.ignoreDaemonSets | quote }}
- name: POD_TERMINATION_GRACE_PERIOD
value: {{ .Values.podTerminationGracePeriod | quote }}
- name: NODE_TERMINATION_GRACE_PERIOD
value: {{ .Values.nodeTerminationGracePeriod | quote }}
- name: EMIT_KUBERNETES_EVENTS
value: {{ .Values.emitKubernetesEvents | quote }}
{{- with .Values.kubernetesEventsExtraAnnotations }}
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
value: {{ . | quote }}
{{- end }}
{{- if or .Values.webhookURL .Values.webhookURLSecretName }}
- name: WEBHOOK_URL
{{- if .Values.webhookURLSecretName }}
valueFrom:
secretKeyRef:
name: {{ .Values.webhookURLSecretName }}
key: webhookurl
{{- else }}
value: {{ .Values.webhookURL | quote }}
{{- end }}
{{- end }}
{{- with .Values.webhookHeaders }}
- name: WEBHOOK_HEADERS
value: {{ . | quote }}
{{- end }}
{{- with .Values.webhookProxy }}
- name: WEBHOOK_PROXY
value: {{ . | quote }}
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
- name: WEBHOOK_TEMPLATE_FILE
value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }}
{{- else if .Values.webhookTemplate }}
- name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }}
{{- end }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: "false"
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: "false"
- name: ENABLE_REBALANCE_MONITORING
value: "false"
- name: ENABLE_REBALANCE_DRAINING
value: "false"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "true"
{{- with .Values.awsRegion }}
- name: AWS_REGION
value: {{ . | quote }}
{{- end }}
{{- with .Values.awsEndpoint }}
- name: AWS_ENDPOINT
value: {{ . | quote }}
{{- end }}
{{- if and .Values.awsAccessKeyID .Values.awsSecretAccessKey }}
- name: AWS_ACCESS_KEY_ID
value: {{ .Values.awsAccessKeyID | quote }}
- name: AWS_SECRET_ACCESS_KEY
value: {{ .Values.awsSecretAccessKey | quote }}
{{- end }}
- name: QUEUE_URL
value: {{ .Values.queueURL | quote }}
- name: WORKERS
value: {{ .Values.workers | quote }}
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: liveness-probe
protocol: TCP
containerPort: {{ .Values.probes.httpGet.port }}
{{- if .Values.enablePrometheusServer }}
- name: http-metrics
protocol: TCP
containerPort: {{ .Values.prometheusServerPort }}
{{- end }}
livenessProbe:
{{- toYaml .Values.probes | nindent 12 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
volumeMounts:
- name: webhook-template
mountPath: /config/
{{- end }}
{{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }}
volumes:
- name: webhook-template
configMap:
name: {{ .Values.webhookTemplateConfigMapName }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,13 @@
{{- if and .Values.enableSqsTerminationDraining (and .Values.podDisruptionBudget (gt (int .Values.replicas) 1)) }}
apiVersion: {{ include "aws-node-termination-handler.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }}
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- if and (not .Values.enableSqsTerminationDraining) (and .Values.enablePrometheusServer .Values.podMonitor.create) -}}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}
{{- if .Values.podMonitor.namespace }}
namespace: {{ .Values.podMonitor.namespace }}
{{- end }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
{{- with .Values.podMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/name
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- port: http-metrics
path: /metrics
{{- with .Values.podMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.podMonitor.sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }}
{{- end -}}

View File

@@ -4,14 +4,25 @@ kind: PodSecurityPolicy
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: false
hostIPC: false
hostNetwork: true
hostNetwork: {{ .Values.useHostNetwork }}
hostPID: false
{{- if and (and (not .Values.enableSqsTerminationDraining) .Values.useHostNetwork ) (or .Values.enablePrometheusServer .Values.enableProbesServer) }}
hostPorts:
{{- if .Values.enablePrometheusServer }}
- min: {{ .Values.prometheusServerPort }}
max: {{ .Values.prometheusServerPort }}
{{- end }}
{{- if .Values.enableProbesServer }}
- min: {{ .Values.probesServerPort }}
max: {{ .Values.probesServerPort }}
{{- end }}
{{- end }}
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
allowedCapabilities:
@@ -27,12 +38,13 @@ spec:
volumes:
- '*'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
namespace: {{ .Release.Namespace }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
@@ -44,11 +56,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
namespace: {{ .Release.Namespace }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
kind: Role
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
subjects:
- kind: ServiceAccount

View File

@@ -0,0 +1,17 @@
{{- if and .Values.enableSqsTerminationDraining .Values.enablePrometheusServer -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
spec:
type: ClusterIP
selector:
{{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 4 }}
ports:
- name: http-metrics
port: {{ .Values.prometheusServerPort }}
targetPort: http-metrics
protocol: TCP
{{- end -}}

View File

@@ -3,11 +3,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,31 @@
{{- if and .Values.enableSqsTerminationDraining (and .Values.enablePrometheusServer .Values.serviceMonitor.create) -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- end }}
labels:
{{- include "aws-node-termination-handler.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/name
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: http-metrics
path: /metrics
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.sampleLimit }}
sampleLimit: {{ . }}
{{- end }}
selector:
matchLabels:
{{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }}
{{- end -}}

View File

@@ -3,100 +3,277 @@
# Declare variables to be passed into your templates.
image:
repository: amazon/aws-node-termination-handler
tag: v1.5.0
repository: public.ecr.aws/aws-ec2/aws-node-termination-handler
# Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }}
tag: ""
pullPolicy: IfNotPresent
pullSecrets: []
securityContext:
runAsUserID: 1000
runAsGroupID: 1000
nameOverride: ""
fullnameOverride: ""
priorityClassName: system-node-critical
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use. If namenot set and create is true, a name is generated using fullname template
name:
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies if PodSecurityPolicy resources should be created
pspEnabled: true
customLabels: {}
podLabels: {}
podAnnotations: {}
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
podSecurityContext:
fsGroup: 1000
## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved
enableSpotInterruptionDraining: ""
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
runAsUser: 1000
runAsGroup: 1000
## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event
enableScheduledEventDraining: ""
terminationGracePeriodSeconds:
taintNode: false
resources: {}
## dryRun tells node-termination-handler to only log calls to kubernetes control plane
nodeSelector: {}
affinity: {}
tolerations: []
# Extra environment variables
extraEnv: []
# Liveness probe settings
probes:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
# Set the log level
logLevel: info
# Log messages in JSON format
jsonLogging: false
enablePrometheusServer: false
prometheusServerPort: 9092
# dryRun tells node-termination-handler to only log calls to kubernetes control plane
dryRun: false
# Cordon but do not drain nodes upon spot interruption termination notice.
cordonOnly: false
# Taint node upon spot interruption termination notice.
taintNode: false
# deleteLocalData tells kubectl to continue even if there are pods using
# emptyDir (local data that will be deleted when the node is drained).
deleteLocalData: ""
deleteLocalData: true
# ignoreDaemonSets causes kubectl to skip Daemon Set managed pods.
ignoreDaemonSets: ""
ignoreDaemonSets: true
# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully.
# If negative, the default value specified in the pod will be used.
gracePeriod: ""
podTerminationGracePeriod: ""
# podTerminationGracePeriod is time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
podTerminationGracePeriod: -1
# nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event.
nodeTerminationGracePeriod: ""
nodeTerminationGracePeriod: 120
# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event
emitKubernetesEvents: false
# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events
# Example: "first=annotation,sample.annotation/number=two"
kubernetesEventsExtraAnnotations: ""
# webhookURL if specified, posts event data to URL upon instance interruption action.
webhookURL: ""
# webhookProxy if specified, uses this HTTP(S) proxy configuration.
webhookProxy: ""
# Webhook URL will be fetched from the secret store using the given name.
webhookURLSecretName: ""
# webhookHeaders if specified, replaces the default webhook headers.
webhookHeaders: ""
# webhookProxy if specified, uses this HTTP(S) proxy configuration.
webhookProxy: ""
# webhookTemplate if specified, replaces the default webhook message template.
webhookTemplate: ""
# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80)
# webhook template file will be fetched from given config map name
# if specified, replaces the default webhook message with the content of the template file
webhookTemplateConfigMapName: ""
# template file name stored in configmap
webhookTemplateConfigMapKey: ""
# enableSqsTerminationDraining If true, this turns on queue-processor mode which drains nodes when an SQS termination event is received
enableSqsTerminationDraining: false
# ---------------------------------------------------------------------------------------------------------------------
# Queue Processor Mode
# ---------------------------------------------------------------------------------------------------------------------
# The number of replicas in the NTH deployment when using queue-processor mode (NOTE: increasing this may cause duplicate webhooks since NTH pods are stateless)
replicas: 1
# Specify the update strategy for the deployment
strategy: {}
# podDisruptionBudget specifies the disruption budget for the controller pods.
# Disruption budget will be configured only when the replicaCount is greater than 1
podDisruptionBudget: {}
# maxUnavailable: 1
serviceMonitor:
# Specifies whether ServiceMonitor should be created
# this needs enableSqsTerminationDraining: true
# and enablePrometheusServer: true
create: false
# Specifies whether the ServiceMonitor should be created in a different namespace than
# the Helm release
namespace:
# Additional labels to add to the metadata
labels: {}
# The Prometheus scrape interval
interval: 30s
# The number of scraped samples that will be accepted
sampleLimit: 5000
priorityClassName: system-cluster-critical
# If specified, use the AWS region for AWS API calls
awsRegion: ""
# Listens for messages on the specified SQS queue URL
queueURL: ""
# The maximum amount of parallel event processors to handle concurrent events
workers: 10
# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node
checkASGTagBeforeDraining: true
# The tag to ensure is on a node if checkASGTagBeforeDraining is true
managedAsgTag: "aws-node-termination-handler/managed"
# If true, assume that ASG tags will be appear on the ASG's instances
assumeAsgTagPropagation: false
# ---------------------------------------------------------------------------------------------------------------------
# IMDS Mode
# ---------------------------------------------------------------------------------------------------------------------
# Create node OS specific daemonset(s). (e.g. "linux", "windows", "linux windows")
targetNodeOs: linux
linuxPodLabels: {}
windowsPodLabels: {}
linuxPodAnnotations: {}
windowsPodAnnotations: {}
# K8s DaemonSet update strategy.
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
daemonsetPriorityClassName: system-node-critical
podMonitor:
# Specifies whether PodMonitor should be created
# this needs enableSqsTerminationDraining: false
# and enablePrometheusServer: true
create: false
# Specifies whether the PodMonitor should be created in a different namespace than
# the Helm release
namespace:
# Additional labels to add to the metadata
labels: {}
# The Prometheus scrape interval
interval: 30s
# The number of scraped samples that will be accepted
sampleLimit: 5000
# Determines if NTH uses host networking for Linux when running the DaemonSet (only IMDS mode; queue-processor never runs with host networking)
# If you have disabled IMDSv1 and are relying on IMDSv2, you'll need to increase the IP hop count to 2 before switching this to false
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
useHostNetwork: true
# Daemonset DNS policy
dnsPolicy: ""
linuxDnsPolicy: ClusterFirstWithHostNet
windowsDnsPolicy: ClusterFirst
daemonsetNodeSelector: {}
linuxNodeSelector: {}
windowsNodeSelector: {}
daemonsetAffinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "eks.amazonaws.com/compute-type"
operator: NotIn
values:
- fargate
linuxAffinity: {}
windowsAffinity: {}
daemonsetTolerations:
- operator: Exists
linuxTolerations: []
windowsTolerations: []
# If the probes server is running for the Daemonset
enableProbesServer: false
# Total number of times to try making the metadata request before failing.
metadataTries: 3
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received
enableSpotInterruptionDraining: true
# enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event
enableScheduledEventDraining: false
# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received
enableRebalanceMonitoring: false
# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received
enableRebalanceDraining: false
# ---------------------------------------------------------------------------------------------------------------------
# Testing
# ---------------------------------------------------------------------------------------------------------------------
# (TESTING USE): If specified, use the provided AWS endpoint to make API calls.
awsEndpoint: ""
# (TESTING USE): These should only be used for testing w/ localstack!
awsAccessKeyID:
awsSecretAccessKey:
# (TESTING USE): Override the default metadata URL (default: http://169.254.169.254:80)
instanceMetadataURL: ""
# (TESTING USE): Mount path for uptime file
procUptimeFile: "/proc/uptime"
# nodeSelector tells the daemonset where to place the node-termination-handler
# pods. By default, this value is empty and every node will receive a pod.
nodeSelector: {}
nodeSelectorTermsOs: ""
nodeSelectorTermsArch: ""
enablePrometheusServer: false
prometheusServerPort: "9092"
tolerations:
- operator: "Exists"
affinity: {}
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use. If namenot set and create is true,
# a name is generated using fullname template
name:
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
rbac:
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
pspEnabled: true
dnsPolicy: "ClusterFirstWithHostNet"
procUptimeFile: /proc/uptime

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,5 @@
apiVersion: v1
appVersion: v1
description: A Helm chart for the AWS UI View
name: aws-ui-view
version: 1.0.0

View File

@@ -0,0 +1,47 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-ui-view.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-ui-view.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-ui-view.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-ui-view.labels" -}}
app.kubernetes.io/name: {{ include "aws-ui-view.name" . }}
helm.sh/chart: {{ include "aws-ui-view.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: {{ include "aws-ui-view.fullname" . }}
rules:
- apiGroups:
- '*'
resources:
- nodes
- namespaces
- pods
- events
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- statefulsets
- replicasets
verbs:
- get
- list
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "aws-ui-view.fullname" . }}
subjects:
- kind: Group
name: Admins
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: {{ include "aws-ui-view.fullname" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,3 @@
nameOverride: aws-ui-view
fullnameOverride: "aws-ui-view"

View File

@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: v1.7.5
appVersion: v1.10.2
description: A Helm chart for the AWS VPC CNI
home: https://github.com/aws/amazon-vpc-cni-k8s
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@@ -15,4 +15,4 @@ maintainers:
name: aws-vpc-cni
sources:
- https://github.com/aws/amazon-vpc-cni-k8s
version: 1.1.3
version: 1.1.13

View File

@@ -54,6 +54,7 @@ The following table lists the configurable parameters for this chart and their d
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `podSecurityContext` | Pod Security Context | `{}` |
| `podAnnotations` | annotations to add to each pod | `{}` |
| `podLabels` | Labels to add to each pod | `{}` |
| `priorityClassName` | Name of the priorityClass | `system-node-critical` |
| `resources` | Resources for the pods | `requests.cpu: 10m` |
| `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` |
@@ -65,6 +66,7 @@ The following table lists the configurable parameters for this chart and their d
| `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` |
| `tolerations` | Optional deployment tolerations | `[]` |
| `updateStrategy` | Optional update strategy | `type: RollingUpdate` |
| `cri.hostPath` | Optional use alternative container runtime | `nil` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters:

View File

@@ -12,9 +12,12 @@ rules:
verbs: ["list", "watch", "get"]
- apiGroups: [""]
resources:
- pods
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: [""]
resources:
- pods
verbs: ["list", "watch", "get", "patch"]
- apiGroups: [""]
resources:
- nodes

View File

@@ -1,5 +1,5 @@
{{- if .Values.crd.create -}}
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
@@ -12,6 +12,10 @@ spec:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
names:
plural: eniconfigs
singular: eniconfig

View File

@@ -14,6 +14,9 @@ spec:
{{- else }}
app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 6 }}
{{- end }}
{{- end }}
template:
metadata:
@@ -27,6 +30,9 @@ spec:
app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
spec:
priorityClassName: "{{ .Values.priorityClassName }}"
serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }}
@@ -63,8 +69,10 @@ spec:
name: metrics
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 12 }}
timeoutSeconds: {{ .Values.livenessProbeTimeoutSeconds }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 12 }}
timeoutSeconds: {{ .Values.readinessProbeTimeoutSeconds }}
env:
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
@@ -92,8 +100,13 @@ spec:
{{- end }}
- mountPath: /host/var/log/aws-routed-eni
name: log-dir
{{- if .Values.cri.hostPath }}
- mountPath: /var/run/cri.sock
name: cri
{{- else }}
- mountPath: /var/run/dockershim.sock
name: dockershim
{{- end }}
- mountPath: /var/run/aws-node
name: run-dir
- mountPath: /run/xtables.lock
@@ -110,9 +123,15 @@ spec:
configMap:
name: {{ include "aws-vpc-cni.fullname" . }}
{{- end }}
{{- with .Values.cri.hostPath }}
- name: cri
hostPath:
{{- toYaml . | nindent 10 }}
{{- else }}
- name: dockershim
hostPath:
path: /var/run/dockershim.sock
{{- end }}
- name: log-dir
hostPath:
path: /var/log/aws-routed-eni

View File

@@ -0,0 +1,170 @@
# Test values for aws-vpc-cni.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This default name override is to maintain backwards compatability with
# existing naming
nameOverride: aws-node
init:
image:
tag: v1.10.2
region: us-west-2
account: "602401143452"
pullPolicy: Always
domain: "amazonaws.com"
# Set to use custom image
# override: "repo/org/image:tag"
env:
DISABLE_TCP_EARLY_DEMUX: "false"
ENABLE_IPv6: "false"
securityContext:
privileged: true
image:
region: us-west-2
tag: v1.10.2
account: "602401143452"
domain: "amazonaws.com"
pullPolicy: Always
# Set to use custom image
# override: "repo/org/image:tag"
# The CNI supports a number of environment variable settings
# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables
env:
ADDITIONAL_ENI_TAGS: "{}"
AWS_VPC_CNI_NODE_PORT_SUPPORT: "true"
AWS_VPC_ENI_MTU: "9001"
AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false"
AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false"
AWS_VPC_K8S_CNI_EXTERNALSNAT: "false"
AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log"
AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG
AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng"
AWS_VPC_K8S_CNI_VETHPREFIX: eni
AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log"
AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG
DISABLE_INTROSPECTION: "false"
DISABLE_METRICS: "false"
ENABLE_POD_ENI: "false"
ENABLE_PREFIX_DELEGATION: "false"
WARM_ENI_TARGET: "1"
WARM_PREFIX_TARGET: "1"
DISABLE_NETWORK_RESOURCE_PROVISIONING: "false"
ENABLE_IPv4: "true"
ENABLE_IPv6: "false"
# this flag enables you to use the match label that was present in the original daemonset deployed by EKS
# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release
originalMatchLabels: false
cniConfig:
enabled: false
fileContents: ""
imagePullSecrets: []
fullnameOverride: "aws-node"
priorityClassName: system-node-critical
podSecurityContext: {}
podAnnotations: {}
podLabels: {}
securityContext:
capabilities:
add:
- "NET_ADMIN"
crd:
create: true
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
livenessProbe:
exec:
command:
- /app/grpc-health-probe
- '-addr=:50051'
- '-connect-timeout=5s'
- '-rpc-timeout=5s'
initialDelaySeconds: 60
livenessProbeTimeoutSeconds: 10
readinessProbe:
exec:
command:
- /app/grpc-health-probe
- '-addr=:50051'
- '-connect-timeout=5s'
- '-rpc-timeout=5s'
initialDelaySeconds: 1
readinessProbeTimeoutSeconds: 10
resources:
requests:
cpu: 10m
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: "10%"
nodeSelector: {}
tolerations: []
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "kubernetes.io/os"
operator: In
values:
- linux
- key: "kubernetes.io/arch"
operator: In
values:
- amd64
- arm64
- key: "eks.amazonaws.com/compute-type"
operator: NotIn
values:
- fargate
eniConfig:
# Specifies whether ENIConfigs should be created
create: false
region: us-west-2
subnets:
# Key identifies the AZ
# Value contains the subnet ID and security group IDs within that AZ
# a:
# id: subnet-123
# securityGroups:
# - sg-123
# b:
# id: subnet-456
# securityGroups:
# - sg-456
# c:
# id: subnet-789
# securityGroups:
# - sg-789
cri:
hostPath: # "/var/run/containerd/containerd.sock"

Some files were not shown because too many files have changed in this diff Show More