feat: adding pleco support with eks

This commit is contained in:
Pierre Mavro
2021-01-25 22:27:11 +01:00
committed by Pierre Mavro
parent d014b1400b
commit 7d496be7c7
25 changed files with 257 additions and 874 deletions

View File

@@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,16 +0,0 @@
apiVersion: v1
appVersion: "0.5.0"
name: aws-ebs-csi-driver
description: A Helm chart for AWS EBS CSI Driver
version: 0.3.0
kubeVersion: ">=1.13.0-0"
home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver
keywords:
- aws
- ebs
- csi
maintainers:
- name: leakingtapan
email: chengpan@amazon.com

View File

@@ -1 +0,0 @@
Original chart git@github.com:kubernetes-sigs/aws-ebs-csi-driver.git

View File

@@ -1,58 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-ebs-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-ebs-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-ebs-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-ebs-csi-driver.labels" -}}
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
helm.sh/chart: {{ include "aws-ebs-csi-driver.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Convert the `--extra-volume-tags` command line arg from a map.
*/}}
{{- define "aws-ebs-csi-driver.extra-volume-tags" -}}
{{- $result := dict "pairs" (list) -}}
{{- range $key, $value := .Values.extraVolumeTags -}}
{{- $noop := printf "%s=%s" $key $value | append $result.pairs | set $result "pairs" -}}
{{- end -}}
{{- if gt (len $result.pairs) 0 -}}
- --extra-volume-tags={{- join "," $result.pairs -}}
{{- end -}}
{{- end -}}

View File

@@ -1,7 +0,0 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@@ -1,117 +0,0 @@
# Node Service
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.node.podAnnotations }}
annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }}
{{- end }}
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
{{- with .Values.node.tolerations }}
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: ebs-plugin
securityContext:
privileged: true
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
args:
- node
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: node-driver-registrar
image: {{ printf "%s:%s" .Values.sidecars.nodeDriverRegistrarImage.repository .Values.sidecars.nodeDriverRegistrarImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"]
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ printf "%s:%s" .Values.sidecars.livenessProbeImage.repository .Values.sidecars.livenessProbeImage.tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@@ -1,151 +0,0 @@
# Controller Service
kind: Deployment
apiVersion: apps/v1
metadata:
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations: {{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
serviceAccountName: ebs-csi-controller-sa
priorityClassName: system-cluster-critical
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
- operator: Exists
{{- with .Values.tolerations }}
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: ebs-plugin
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- controller
- --endpoint=$(CSI_ENDPOINT)
{{ include "aws-ebs-csi-driver.extra-volume-tags" . }}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: key_id
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: access_key
optional: true
{{- if .Values.region }}
- name: AWS_REGION
value: {{ .Values.region }}
{{- end }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
{{- with .Values.resources }}
resources: {{ toYaml . | nindent 12 }}
{{- end }}
- name: csi-provisioner
image: {{ printf "%s:%s" .Values.sidecars.provisionerImage.repository .Values.sidecars.provisionerImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
{{- if .Values.enableVolumeScheduling }}
- --feature-gates=Topology=true
{{- end}}
- --enable-leader-election
- --leader-election-type=leases
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: {{ printf "%s:%s" .Values.sidecars.attacherImage.repository .Values.sidecars.attacherImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
- --leader-election=true
- --leader-election-type=leases
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- if .Values.enableVolumeSnapshot }}
- name: csi-snapshotter
image: {{ printf "%s:%s" .Values.sidecars.snapshotterImage.repository .Values.sidecars.snapshotterImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- end }}
{{- if .Values.enableVolumeResizing }}
- name: csi-resizer
image: {{ printf "%s:%s" .Values.sidecars.resizerImage.repository .Values.sidecars.resizerImage.tag }}
imagePullPolicy: Always
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- end }}
- name: liveness-probe
image: {{ printf "%s:%s" .Values.sidecars.livenessProbeImage.repository .Values.sidecars.livenessProbeImage.tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -1,233 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshot-controller-binding
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-snapshot-controller-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-leaderelection
namespace: kube-system
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: Role
name: snapshot-controller-leaderelection
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- if .Values.enableVolumeResizing }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
{{- with .Values.serviceAccount.controller.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-snapshot-controller
namespace: kube-system
{{- with .Values.serviceAccount.snapshot.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}

View File

@@ -1,26 +0,0 @@
{{- if .Values.enableVolumeSnapshot }}
#Snapshot controller
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: ebs-snapshot-controller
namespace: kube-system
spec:
serviceName: ebs-snapshot-controller
replicas: 1
selector:
matchLabels:
app: ebs-snapshot-controller
template:
metadata:
labels:
app: ebs-snapshot-controller
spec:
serviceAccount: ebs-snapshot-controller
containers:
- name: snapshot-controller
image: quay.io/k8scsi/snapshot-controller:v2.1.1
args:
- --v=5
- --leader-election=false
{{- end }}

View File

@@ -1,86 +0,0 @@
# Default values for aws-ebs-csi-driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
image:
repository: amazon/aws-ebs-csi-driver
tag: "v0.5.0"
pullPolicy: IfNotPresent
sidecars:
provisionerImage:
repository: quay.io/k8scsi/csi-provisioner
tag: "v1.5.0"
attacherImage:
repository: quay.io/k8scsi/csi-attacher
tag: "v1.2.0"
snapshotterImage:
repository: quay.io/k8scsi/csi-snapshotter
tag: "v2.1.1"
livenessProbeImage:
repository: quay.io/k8scsi/livenessprobe
tag: "v1.1.0"
resizerImage:
repository: quay.io/k8scsi/csi-resizer
tag: "v0.3.0"
nodeDriverRegistrarImage:
repository: quay.io/k8scsi/csi-node-driver-registrar
tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
# True if enable volume scheduling for dynamic volume provisioning
enableVolumeScheduling: false
# True if enable volume resizing
enableVolumeResizing: false
# True if enable volume snapshot
enableVolumeSnapshot: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Extra volume tags to attach to each dynamically provisioned volume.
# ---
# extraVolumeTags:
# key1: value1
# key2: value2
extraVolumeTags: {}
# AWS region to use. If not specified then the region will be looked up via the AWS EC2 metadata
# service.
# ---
# region: us-east-1
region: ""
node:
podAnnotations: {}
tolerations: []
serviceAccount:
controller:
annotations: {}
snapshot:
annotations: {}

View File

@@ -0,0 +1,42 @@
locals {
additional_tags = {
}
}
locals {
tags_eks = {
ClusterId = var.kubernetes_cluster_id,
ClusterName = var.kubernetes_cluster_name,
Region = var.region
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds{% endif %}
}
}
resource "aws_cloudwatch_log_group" "eks_cloudwatch_log_group" {
name = var.eks_cloudwatch_log_group
retention_in_days = 7
tags = local.tags_eks
}
resource "aws_eks_cluster" "eks_cluster" {
name = "qovery-${var.kubernetes_cluster_id}"
role_arn = aws_iam_role.eks_cluster.arn
version = var.eks_k8s_versions.masters
enabled_cluster_log_types = ["api","audit","authenticator","controllerManager","scheduler"]
vpc_config {
security_group_ids = [aws_security_group.eks_cluster.id]
subnet_ids = flatten([aws_subnet.eks_zone_a.*.id, aws_subnet.eks_zone_b.*.id,aws_subnet.eks_zone_c.*.id])
}
tags = local.tags_eks
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.eks_cluster_AmazonEKSServicePolicy,
aws_cloudwatch_log_group.eks_cloudwatch_log_group,
]
}

View File

@@ -7,6 +7,7 @@ locals {
Name = "qovery-eks-workers",
"kubernetes.io/cluster/qovery-${var.kubernetes_cluster_id}" = "shared",
"kubernetes.io/role/elb" = 1,
{% if resource_expiration_in_seconds is defined %}ttl = var.resource_expiration_in_seconds,{% endif %}
}
)
}

View File

@@ -79,6 +79,18 @@ resource "helm_release" "cluster_autoscaler" {
value = "system-cluster-critical"
}
# cluster autoscaler options
set {
name = "extraArgs.balance-similar-node-groups"
value = "true"
}
set {
name = "extraArgs.balance-similar-node-groups"
value = "true"
}
# observability
set {
name = "serviceMonitor.enabled"

View File

@@ -13,6 +13,11 @@ resource "helm_release" "pleco" {
value = timestamp()
}
set {
name = "enabledFeatures.awsRegions"
value = "{eu-west-3,us-east-2}"
}
set {
name = "enabledFeatures.rds"
value = "true"
@@ -28,6 +33,11 @@ resource "helm_release" "pleco" {
value = "true"
}
set {
name = "enabledFeatures.eks"
value = "true"
}
set {
name = "enabledFeatures.disableDryRun"
value = "true"
@@ -43,11 +53,6 @@ resource "helm_release" "pleco" {
value = "{{ aws_secret_key }}"
}
set {
name = "environmentVariables.AWS_DEFAULT_REGION"
value = "{{ aws_region }}"
}
set {
name = "environmentVariables.LOG_LEVEL"
value = "debug"
@@ -57,5 +62,6 @@ resource "helm_release" "pleco" {
aws_eks_cluster.eks_cluster,
helm_release.aws_vpc_cni,
helm_release.cluster_autoscaler,
helm_release.prometheus_operator,
]
}

View File

@@ -12,6 +12,9 @@ resource "helm_release" "prometheus_operator" {
name = "prometheus-operator"
chart = "common/charts/prometheus-operator"
namespace = local.prometheus_namespace
// high timeout because on bootstrap, it's one of the biggest dependencies and on upgrade, it can takes time
// to upgrade because of crd and the number of elements it has to deploy
timeout = 600
create_namespace = true
atomic = true
max_history = 50

View File

@@ -261,3 +261,12 @@ variable "discord_api_key" {
default = "{{ discord_api_key }}"
type = string
}
{%- if resource_expiration_in_seconds is defined %}
# Pleco ttl
variable "resource_expiration_in_seconds" {
description = "Resource expiration in seconds"
default = {{ resource_expiration_in_seconds }}
type = number
}
{% endif %}

View File

@@ -1,9 +1,9 @@
apiVersion: v2
appVersion: 0.3.3
appVersion: 0.4.1
description: Automatically removes Cloud managed services and Kubernetes resources
based on tags with TTL
home: https://github.com/Qovery/pleco
icon: https://github.com/Qovery/pleco/raw/main/assets/pleco_logo.png
name: pleco
type: application
version: 0.3.3
version: 0.4.1

View File

@@ -46,6 +46,10 @@ spec:
{{ if .Values.enabledFeatures.kubernetes }}
- --kube-conn
- {{ .Values.enabledFeatures.kubernetes }}
{{ end }}
{{ if .Values.enabledFeatures.awsRegions }}
- --aws-regions
- "{{ join "," .Values.enabledFeatures.awsRegions }}"
{{ end }}
{{ if eq .Values.enabledFeatures.rds true}}
- --enable-rds
@@ -55,7 +59,16 @@ spec:
{{ end }}
{{ if eq .Values.enabledFeatures.documentdb true}}
- --enable-documentdb
{{ end }}
{{ end }}
{{ if eq .Values.enabledFeatures.eks true}}
- --enable-eks
{{ end }}
{{ if or (eq .Values.enabledFeatures.elb true) (eq .Values.enabledFeatures.eks true)}}
- --enable-elb
{{ end }}
{{ if or (eq .Values.enabledFeatures.ebs true) (eq .Values.enabledFeatures.eks true)}}
- --enable-ebs
{{ end }}
env:
{{ range $key, $value := .Values.environmentVariables -}}
- name: "{{ $key }}"

View File

@@ -3,13 +3,12 @@ replicaCount: 1
image:
repository: qoveryrd/pleco
pullPolicy: IfNotPresent
plecoImageTag: "v0.3.3"
plecoImageTag: "0.4.1"
environmentVariables:
LOG_LEVEL: "info"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
# AWS_DEFAULT_REGION: ""
# KUBECONFIG: ""
enabledFeatures:
@@ -17,9 +16,16 @@ enabledFeatures:
checkInterval: 120
# Choose between in/out/off
kubernetes: "in"
# AWS
awsRegions: []
# - eu-west-3
# - us-east-2
rds: false
documentdb: false
elasticache: false
eks: false
elb: false
ebs: false
imagePullSecrets: []
nameOverride: ""
@@ -48,8 +54,8 @@ securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources:

View File

@@ -58,7 +58,7 @@ charts:
version: 12.0.1
dest: services
- name: pleco
version: 0.3.3
version: 0.4.1
repo_name: pleco
- name: do-k8s-token-rotate
version: 0.1.3

View File

@@ -12,10 +12,11 @@ use crate::cloud_provider::kubernetes::{Kind, Kubernetes, KubernetesNode};
use crate::cloud_provider::models::WorkerNodeDataTemplate;
use crate::cloud_provider::{kubernetes, CloudProvider};
use crate::cmd;
use crate::cmd::kubectl::kubectl_exec_get_all_namespaces;
use crate::cmd::kubectl::{kubectl_delete_objects_in_all_namespaces, kubectl_exec_get_all_namespaces};
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider;
use crate::dns_provider::DnsProvider;
use crate::error::EngineErrorCause::Internal;
use crate::error::{cast_simple_error_to_engine_error, EngineError, EngineErrorCause};
use crate::fs::workspace_directory;
use crate::models::{
@@ -24,6 +25,9 @@ use crate::models::{
use crate::object_storage::s3::S3;
use crate::object_storage::ObjectStorage;
use crate::string::terraform_list_format;
use retry::delay::Fibonacci;
use retry::Error::Operation;
use retry::OperationResult;
pub mod node;
@@ -214,6 +218,12 @@ impl<'a> EKS<'a> {
);
context.insert("test_cluster", &test_cluster);
if self.context.resource_expiration_in_seconds().is_some() {
context.insert(
"resource_expiration_in_seconds",
&self.context.resource_expiration_in_seconds(),
)
}
// DNS configuration
context.insert("managed_dns", &managed_dns_list);
@@ -399,7 +409,7 @@ impl<'a> Kubernetes for EKS<'a> {
},
ProgressLevel::Info,
Some(format!(
"start to create EKS cluster {} with id {}",
"Preparing EKS {} cluster deployment with id {}",
self.name(),
self.id()
)),
@@ -437,24 +447,57 @@ impl<'a> Kubernetes for EKS<'a> {
),
)?;
let _ = cast_simple_error_to_engine_error(
self.engine_error_scope(),
listeners_helper.deployment_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(format!(
"Deploying EKS {} cluster deployment with id {}",
self.name(),
self.id()
)),
self.context.execution_id(),
crate::cmd::terraform::terraform_exec_with_init_validate_plan_apply(
temp_dir.as_str(),
self.context.is_dry_run_deploy(),
),
)?;
));
Ok(())
let terraform_result =
retry::retry(
Fibonacci::from_millis(60000).take(3),
|| match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
crate::cmd::terraform::terraform_exec_with_init_validate_plan_apply(
temp_dir.as_str(),
self.context.is_dry_run_deploy(),
),
) {
Ok(_) => OperationResult::Ok(()),
Err(e) => OperationResult::Retry(e),
},
);
match terraform_result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(msg)) => Err(EngineError::new(
EngineErrorCause::Internal,
self.engine_error_scope(),
self.context().execution_id(),
Some(format!(
"rror while deploying cluster {} with id {}. {}",
self.name(),
self.id(),
msg
)),
)),
}
}
fn on_create_error(&self) -> Result<(), EngineError> {
warn!("EKS.on_create_error() called for {}", self.name());
// FIXME
Err(self.engine_error(
EngineErrorCause::Internal,
format!("{} Kubernetes cluster rollback not implemented", self.name()),
format!("{} Kubernetes cluster failed on deployment", self.name()),
))
}
@@ -487,9 +530,9 @@ impl<'a> Kubernetes for EKS<'a> {
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Warn,
ProgressLevel::Info,
Some(format!(
"start to delete EKS cluster {} with id {}",
"Preparing to delete EKS cluster {} with id {}",
self.name(),
self.id()
)),
@@ -537,6 +580,15 @@ impl<'a> Kubernetes for EKS<'a> {
// should apply before destroy to be sure destroy will compute on all resources
// don't exit on failure, it can happen if we resume a destroy process
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(format!("Ensuring everything is up to date before deleting",)),
self.context.execution_id(),
));
info!("Running Terraform apply");
match cast_simple_error_to_engine_error(
self.engine_error_scope(),
@@ -548,6 +600,17 @@ impl<'a> Kubernetes for EKS<'a> {
};
// should make the diff between all namespaces and qovery managed namespaces
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Warn,
Some(format!(
"Deleting all non-Qovery deployed applications and dependencies",
)),
self.context.execution_id(),
));
match all_namespaces {
Ok(namespace_vec) => {
let namespaces_as_str = namespace_vec.iter().map(std::ops::Deref::deref).collect();
@@ -581,6 +644,17 @@ impl<'a> Kubernetes for EKS<'a> {
),
}
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Warn,
Some(format!(
"Deleting all Qovery deployed elements and associated dependencies",
)),
self.context.execution_id(),
));
// https://cert-manager.io/docs/installation/uninstall/kubernetes/
// required to avoid namespace stuck on deletion
info!("Delete cert-manager related objects to prepare deletion");
@@ -653,13 +727,47 @@ impl<'a> Kubernetes for EKS<'a> {
}
info!("Running Terraform destroy");
let terraform_result = cast_simple_error_to_engine_error(
self.engine_error_scope(),
listeners_helper.delete_in_progress(ProgressInfo::new(
ProgressScope::Infrastructure {
execution_id: self.context.execution_id().to_string(),
},
ProgressLevel::Info,
Some(format!(
"Starting to delete EKS cluster {} with id {}",
self.name(),
self.id()
)),
self.context.execution_id(),
cmd::terraform::terraform_exec_with_init_plan_destroy(temp_dir.as_str()),
);
));
Ok(())
let terraform_result =
retry::retry(
Fibonacci::from_millis(60000).take(3),
|| match cast_simple_error_to_engine_error(
self.engine_error_scope(),
self.context.execution_id(),
cmd::terraform::terraform_exec_with_init_plan_destroy(temp_dir.as_str()),
) {
Ok(_) => OperationResult::Ok(()),
Err(e) => OperationResult::Retry(e),
},
);
match terraform_result {
Ok(_) => Ok(()),
Err(Operation { error, .. }) => Err(error),
Err(retry::Error::Internal(msg)) => Err(EngineError::new(
EngineErrorCause::Internal,
self.engine_error_scope(),
self.context().execution_id(),
Some(format!(
"Error while deleting cluster {} with id {}: {}",
self.name(),
self.id(),
msg
)),
)),
}
}
fn on_delete_error(&self) -> Result<(), EngineError> {

View File

@@ -48,8 +48,7 @@ where
chart_root_dir.as_ref().to_str().unwrap()
);
let helm_history_rows =
helm_exec_history(kubernetes_config.as_ref(), namespace, release_name, envs)?;
let helm_history_rows = helm_exec_history(kubernetes_config.as_ref(), namespace, release_name, envs)?;
// take the last deployment from helm history - or return none if there is no history
Ok(match helm_history_rows.first() {
@@ -312,8 +311,7 @@ where
chart_root_dir.as_ref().to_str().unwrap()
);
let helm_history_rows =
helm_exec_history(kubernetes_config.as_ref(), namespace, release_name, envs)?;
let helm_history_rows = helm_exec_history(kubernetes_config.as_ref(), namespace, release_name, envs)?;
// take the last deployment from helm history - or return none if there is no history
Ok(match helm_history_rows.first() {
@@ -322,10 +320,7 @@ where
})
}
pub fn helm_list<P>(
kubernetes_config: P,
envs: Vec<(&str, &str)>,
) -> Result<Vec<HelmList>, SimpleError>
pub fn helm_list<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<Vec<HelmList>, SimpleError>
where
P: AsRef<Path>,
{

View File

@@ -7,8 +7,8 @@ use serde::de::DeserializeOwned;
use crate::cloud_provider::digitalocean::models::svc::DOKubernetesList;
use crate::cmd::structs::{
Item, KubernetesEvent, KubernetesJob, KubernetesList, KubernetesNode, KubernetesPod,
KubernetesPodStatusPhase, KubernetesService, LabelsContent,
Item, KubernetesEvent, KubernetesJob, KubernetesList, KubernetesNode, KubernetesPod, KubernetesPodStatusPhase,
KubernetesService, LabelsContent,
};
use crate::cmd::utilities::exec_with_envs_and_output;
use crate::constants::KUBECONFIG;
@@ -107,10 +107,7 @@ where
Err(err) => {
error!("{:?}", err);
error!("{}", output_string.as_str());
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some(output_string),
));
return Err(SimpleError::new(SimpleErrorKind::Other, Some(output_string)));
}
}
}
@@ -127,16 +124,7 @@ where
{
match do_kubectl_exec_describe_service(kubernetes_config, namespace, selector, envs) {
Ok(result) => {
if result.items.is_empty()
|| result
.items
.first()
.unwrap()
.status
.load_balancer
.ingress
.is_empty()
{
if result.items.is_empty() || result.items.first().unwrap().status.load_balancer.ingress.is_empty() {
return Ok(None);
}
@@ -169,16 +157,7 @@ where
{
match do_kubectl_exec_describe_service(kubernetes_config, namespace, selector, envs) {
Ok(result) => {
if result.items.is_empty()
|| result
.items
.first()
.unwrap()
.status
.load_balancer
.ingress
.is_empty()
{
if result.items.is_empty() || result.items.first().unwrap().status.load_balancer.ingress.is_empty() {
return Ok(None);
}
@@ -215,16 +194,7 @@ where
envs,
)?;
if result.items.is_empty()
|| result
.items
.first()
.unwrap()
.status
.load_balancer
.ingress
.is_empty()
{
if result.items.is_empty() || result.items.first().unwrap().status.load_balancer.ingress.is_empty() {
return Ok(None);
}
@@ -299,15 +269,7 @@ where
{
let result = kubectl_exec_get_pod(kubernetes_config, namespace, selector, envs)?;
if result.items.is_empty()
|| result
.items
.first()
.unwrap()
.status
.container_statuses
.is_empty()
{
if result.items.is_empty() || result.items.first().unwrap().status.container_statuses.is_empty() {
return Ok(None);
}
@@ -387,11 +349,7 @@ where
Ok(Some(false))
}
pub fn kubectl_exec_is_namespace_present<P>(
kubernetes_config: P,
namespace: &str,
envs: Vec<(&str, &str)>,
) -> bool
pub fn kubectl_exec_is_namespace_present<P>(kubernetes_config: P, namespace: &str, envs: Vec<(&str, &str)>) -> bool
where
P: AsRef<Path>,
{
@@ -425,11 +383,7 @@ where
}
}
pub fn kubectl_exec_create_namespace_without_labels(
namespace: &str,
kube_config: &str,
envs: Vec<(&str, &str)>,
) {
pub fn kubectl_exec_create_namespace_without_labels(namespace: &str, kube_config: &str, envs: Vec<(&str, &str)>) {
let _ = kubectl_exec_create_namespace(kube_config, namespace, None, envs);
}
@@ -443,11 +397,7 @@ where
P: AsRef<Path>,
{
// don't create the namespace if already exists and not not return error in this case
if !kubectl_exec_is_namespace_present(
kubernetes_config.as_ref(),
namespace.clone(),
envs.clone(),
) {
if !kubectl_exec_is_namespace_present(kubernetes_config.as_ref(), namespace.clone(), envs.clone()) {
// create namespace
let mut _envs = Vec::with_capacity(envs.len() + 1);
_envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap()));
@@ -494,11 +444,7 @@ where
));
};
if !kubectl_exec_is_namespace_present(
kubernetes_config.as_ref(),
namespace.clone(),
envs.clone(),
) {
if !kubectl_exec_is_namespace_present(kubernetes_config.as_ref(), namespace.clone(), envs.clone()) {
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some(format! {"Can't set labels on namespace {} because it doesn't exists", namespace}),
@@ -512,10 +458,7 @@ where
for label in labels.iter() {
labels_string.push(format! {"{}={}", label.name, label.value});
}
let labels_str = labels_string
.iter()
.map(|x| x.as_ref())
.collect::<Vec<&str>>();
let labels_str = labels_string.iter().map(|x| x.as_ref()).collect::<Vec<&str>>();
command_args.extend(labels_str);
let mut _envs = Vec::with_capacity(envs.len() + 1);
@@ -584,11 +527,8 @@ pub fn kubectl_exec_get_all_namespaces<P>(
where
P: AsRef<Path>,
{
let result = kubectl_exec::<P, KubernetesList<Item>>(
vec!["get", "namespaces", "-o", "json"],
kubernetes_config,
envs,
);
let result =
kubectl_exec::<P, KubernetesList<Item>>(vec!["get", "namespaces", "-o", "json"], kubernetes_config, envs);
let mut to_return: Vec<String> = Vec::new();
@@ -743,11 +683,7 @@ pub fn kubectl_exec_get_node<P>(
where
P: AsRef<Path>,
{
kubectl_exec::<P, KubernetesList<KubernetesNode>>(
vec!["get", "node", "-o", "json"],
kubernetes_config,
envs,
)
kubectl_exec::<P, KubernetesList<KubernetesNode>>(vec!["get", "node", "-o", "json"], kubernetes_config, envs)
}
pub fn kubectl_exec_get_pod<P>(
@@ -812,11 +748,7 @@ where
}
}
fn kubectl_exec<P, T>(
args: Vec<&str>,
kubernetes_config: P,
envs: Vec<(&str, &str)>,
) -> Result<T, SimpleError>
fn kubectl_exec<P, T>(args: Vec<&str>, kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<T, SimpleError>
where
P: AsRef<Path>,
T: DeserializeOwned,
@@ -846,10 +778,7 @@ where
Err(err) => {
error!("{:?}", err);
error!("{}", output_string.as_str());
return Err(SimpleError::new(
SimpleErrorKind::Other,
Some(output_string),
));
return Err(SimpleError::new(SimpleErrorKind::Other, Some(output_string)));
}
};

View File

@@ -71,9 +71,8 @@ fn create_and_destroy_eks_cluster(region: &str, test_name: &str) {
let mut read_buf = String::new();
file.read_to_string(&mut read_buf).unwrap();
let options_result = serde_json::from_str::<
qovery_engine::cloud_provider::aws::kubernetes::Options,
>(read_buf.as_str());
let options_result =
serde_json::from_str::<qovery_engine::cloud_provider::aws::kubernetes::Options>(read_buf.as_str());
let kubernetes = EKS::new(
context.clone(),
@@ -121,10 +120,7 @@ fn create_and_destroy_eks_cluster_in_eu_west_3() {
let region = "eu-west-3";
create_and_destroy_eks_cluster(
region.clone(),
&format!(
"create_and_destroy_eks_cluster_in_{}",
region.replace("-", "_")
),
&format!("create_and_destroy_eks_cluster_in_{}", region.replace("-", "_")),
);
}
@@ -134,8 +130,6 @@ fn create_and_destroy_eks_cluster_in_us_east_2() {
let region = "us-east-2";
create_and_destroy_eks_cluster(
region.clone(),
&format!(
"create_and_destroy_eks_cluster_in_{}",
region.replace("-", "_")
),
&format!("create_and_destroy_eks_cluster_in_{}", region.replace("-", "_")),
);
}