feat: add kubeconfig to s3 + firewall rules + clean + EC2 helm charts

This commit is contained in:
Pierre Mavro
2022-05-01 19:25:50 +02:00
parent e665d96cde
commit 153f054123
73 changed files with 613 additions and 3200 deletions

View File

@@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
crds/kustomization.yaml

View File

@@ -1,6 +0,0 @@
apiVersion: v1
appVersion: 3.13.4
description: A Helm chart for installing Calico on AWS
icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png
name: aws-calico
version: 0.3.1

View File

@@ -1,66 +0,0 @@
# Calico on AWS
This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html
## Prerequisites
- Kubernetes 1.11+ running on AWS
## Installing the Chart
First add the EKS repository to Helm:
```shell
helm repo add eks https://aws.github.io/eks-charts
```
Install the Calico CRDs:
```shell
kubectl apply -k github.com/aws/eks-charts/tree/master/stable/aws-calico/crds
```
To install the chart with the release name `aws-calico` and default configuration:
```shell
$ helm install --name aws-calico --namespace kube-system eks/aws-calico
```
To install into an EKS cluster where the CNI is already installed, you can run:
```shell
helm upgrade --install --recreate-pods --force aws-calico --namespace kube-system eks/aws-calico
```
If you receive an error similar to `Error: release aws-calico failed: <resource> "aws-calico" already exists`, simply rerun the above command.
## Configuration
The following table lists the configurable parameters for this chart and their default values.
| Parameter | Description | Default |
|----------------------------------------|---------------------------------------------------------|---------------------------------|
| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` |
| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` |
| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` |
| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` |
| `calico.node.image` | Calico Node Image | `quay.io/calico/node` |
| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` |
| `calico.node.logseverity` | Calico Node Log Severity | `Info` |
| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` |
| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` |
| `calico.tag` | Calico version | `v3.8.1` |
| `fullnameOverride` | Override the fullname of the chart | `calico` |
| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` |
| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters:
```shell
$ helm install --name aws-calico --namespace kube-system eks/aws-calico --values values.yaml
```

View File

@@ -1,214 +0,0 @@
# Create all the CustomResourceDefinitions needed for
# Calico policy-only mode.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
versions:
- name: v1
served: true
storage: true
names:
kind: NetworkSet
plural: networksets
singular: networkset

View File

@@ -1,55 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-calico.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-calico.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-calico.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-calico.labels" -}}
helm.sh/chart: {{ include "aws-calico.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "aws-calico.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "aws-calico.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,22 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
data:
ladder: |-
{
"coresToReplicas": [],
"nodesToReplicas":
[
[1, 1],
[10, 2],
[100, 3],
[250, 4],
[500, 5],
[1000, 6],
[1500, 7],
[2000, 8]
]
}

View File

@@ -1,142 +0,0 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: "{{ include "aws-calico.fullname" . }}-node"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
{{ include "aws-calico.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
spec:
priorityClassName: system-node-critical
nodeSelector:
{{- toYaml .Values.calico.node.nodeSelector | nindent 8 }}
hostNetwork: true
serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node"
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: "{{ .Values.calico.node.image }}:{{ .Values.calico.tag }}"
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Use eni not cali for interface prefix
- name: FELIX_INTERFACEPREFIX
value: "eni"
# Enable felix info logging.
- name: FELIX_LOGSEVERITYSCREEN
value: "{{ .Values.calico.node.logseverity }}"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,ecs"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: FELIX_TYPHAK8SSERVICENAME
value: "calico-typha"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# This will make Felix honor AWS VPC CNI's mangle table
# rules.
- name: FELIX_IPTABLESMANGLEALLOWACTION
value: Return
# Disable IPV6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
- name: FELIX_LOGSEVERITYSYS
value: "none"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: NO_DEFAULT_POOLS
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# No IP address needed.
- name: IP
value: ""
- name: FELIX_HEALTHENABLED
value: "true"
{{- if .Values.calico.node.extraEnv }}
{{- toYaml .Values.calico.node.extraEnv | nindent 12 }}
{{- end }}
securityContext:
privileged: true
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
periodSeconds: 10
resources:
{{- toYaml .Values.calico.node.resources | nindent 12 }}
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
volumes:
# Used to ensure proper kmods are installed.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
tolerations:
# Make sure calico/node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists

View File

@@ -1,128 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
{{ include "aws-calico.labels" . | indent 4 }}
spec:
revisionHistoryLimit: 2
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
template:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
priorityClassName: system-cluster-critical
nodeSelector:
{{- toYaml .Values.calico.typha.nodeSelector | nindent 8 }}
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
{{- if .Values.calico.typha.tolerations }}
{{ toYaml .Values.calico.typha.tolerations | indent 10 }}
{{- end }}
hostNetwork: true
serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node"
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: "{{ .Values.calico.typha.image }}:{{ .Values.calico.tag }}"
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Use eni not cali for interface prefix
- name: FELIX_INTERFACEPREFIX
value: "eni"
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
- name: TYPHA_LOGSEVERITYSCREEN
value: "{{ .Values.calico.typha.logseverity }}"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "true"
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "9093"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_MAXCONNECTIONSLOWERLIMIT
value: "1"
- name: TYPHA_HEALTHENABLED
value: "true"
# This will make Felix honor AWS VPC CNI's mangle table
# rules.
- name: FELIX_IPTABLESMANGLEALLOWACTION
value: Return
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
resources:
{{- toYaml .Values.calico.typha.resources | nindent 12 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler"
{{ include "aws-calico.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler"
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler"
spec:
priorityClassName: system-cluster-critical
nodeSelector:
{{- toYaml .Values.calico.typha_autoscaler.nodeSelector | nindent 8 }}
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
{{- if .Values.calico.typha_autoscaler.tolerations }}
{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }}
{{- end }}
containers:
- image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}"
name: autoscaler
command:
- /cluster-proportional-autoscaler
- --namespace={{ .Release.Namespace }}
- --configmap={{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler
- --target=deployment/{{ include "aws-calico.fullname" . }}-typha
- --logtostderr=true
- --v=2
resources:
{{- toYaml .Values.calico.typha_autoscaler.resources | nindent 12 }}
serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha"

View File

@@ -1,13 +0,0 @@
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
{{ include "aws-calico.labels" . | indent 4 }}
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"

View File

@@ -1,211 +0,0 @@
{{- if .Values.podSecurityPolicy.create -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "aws-calico.fullname" . }}-node
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node
{{ include "aws-calico.labels" . | indent 4 }}
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- ALL
hostNetwork: true
hostIPC: false
hostPID: false
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
- 'hostPath'
allowedHostPaths:
- pathPrefix: "/lib/modules"
readOnly: false
- pathPrefix: "/var/run/calico"
readOnly: false
- pathPrefix: "/var/lib/calico"
readOnly: false
- pathPrefix: "/run/xtables.lock"
readOnly: false
runAsUser:
rule: 'RunAsAny'
runAsGroup:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "aws-calico.fullname" . }}-typha
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha
{{ include "aws-calico.labels" . | indent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
hostNetwork: true
hostPorts:
- max: 5473
min: 5473
hostIPC: false
hostPID: false
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler
{{ include "aws-calico.labels" . | indent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
hostNetwork: false
hostIPC: false
hostPID: false
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "aws-calico.fullname" . }}-node-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node
{{ include "aws-calico.labels" . | indent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ include "aws-calico.fullname" . }}-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "aws-calico.fullname" . }}-typha-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha
{{ include "aws-calico.labels" . | indent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ include "aws-calico.fullname" . }}-typha
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler
{{ include "aws-calico.labels" . | indent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "aws-calico.fullname" . }}-node-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-node
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
kind: Role
name: {{ include "aws-calico.fullname" . }}-node-psp
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "aws-calico.serviceAccountName" . }}-node
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "aws-calico.fullname" . }}-typha-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
kind: Role
name: {{ include "aws-calico.fullname" . }}-typha-psp
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "aws-calico.serviceAccountName" . }}-node
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp
labels:
app.kubernetes.io/name: {{ include "aws-calico.fullname" . }}-typha-autoscaler
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
kind: Role
name: {{ include "aws-calico.fullname" . }}-typha-horizontal-autoscaler-psp
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "aws-calico.serviceAccountName" . }}-typha-cpha
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,214 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "{{ include "aws-calico.fullname" . }}-node"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
{{ include "aws-calico.labels" . | indent 4 }}
rules:
# The CNI plugin needs to get pods, nodes, namespaces, and configmaps.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "{{ include "aws-calico.fullname" . }}-node"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "{{ include "aws-calico.fullname" . }}-node"
subjects:
- kind: ServiceAccount
name: "{{ include "aws-calico.serviceAccountName" . }}-node"
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
subjects:
- kind: ServiceAccount
name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha"
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
labels:
{{ include "aws-calico.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
subjects:
- kind: ServiceAccount
name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha"
namespace: "{{ .Release.Namespace }}"

View File

@@ -1,18 +0,0 @@
# Create the ServiceAccount and roles necessary for Calico.
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ include "aws-calico.serviceAccountName" . }}-node"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node"
{{ include "aws-calico.labels" . | indent 4 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ include "aws-calico.serviceAccountName" . }}-typha-cpha"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-cpha"
{{ include "aws-calico.labels" . | indent 4 }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "{{ include "aws-calico.fullname" . }}-typha"
labels:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"
{{ include "aws-calico.labels" . | indent 4 }}
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: "{{ include "aws-calico.fullname" . }}-typha"
selector:
app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha"

View File

@@ -1,54 +0,0 @@
fullnameOverride: calico
serviceAccount:
create: true
podSecurityPolicy:
create: false
calico:
tag: v3.13.4
typha:
logseverity: Info #Debug, Info, Warning, Error, Fatal
image: quay.io/calico/typha
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "96Mi"
cpu: "100m"
tolerations: []
nodeSelector:
beta.kubernetes.io/os: linux
node:
logseverity: Info #Debug, Info, Warning, Error, Fatal
image: quay.io/calico/node
resources:
requests:
memory: "32Mi"
cpu: "20m"
limits:
memory: "64Mi"
cpu: "100m"
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
nodeSelector:
beta.kubernetes.io/os: linux
typha_autoscaler:
resources:
requests:
memory: "16Mi"
cpu: "10m"
limits:
memory: "32Mi"
cpu: "10m"
tolerations: []
nodeSelector:
beta.kubernetes.io/os: linux
autoscaler:
tag: "1.7.1"
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,6 +0,0 @@
apiVersion: v2
name: aws-limits-exporter
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: 0.3.0

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-limits-exporter.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-limits-exporter.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-limits-exporter.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "aws-limits-exporter.labels" -}}
helm.sh/chart: {{ include "aws-limits-exporter.chart" . }}
{{ include "aws-limits-exporter.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "aws-limits-exporter.selectorLabels" -}}
app.kubernetes.io/name: {{ include "aws-limits-exporter.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "aws-limits-exporter.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "aws-limits-exporter.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,67 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "aws-limits-exporter.fullname" . }}
labels:
{{- include "aws-limits-exporter.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "aws-limits-exporter.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "aws-limits-exporter.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "aws-limits-exporter.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http-metrics
containerPort: 8080
protocol: TCP
env:
- name: AWS_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-limits-exporter
key: awsAccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-limits-exporter
key: awsSecretKey
livenessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 20
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: aws-limits-exporter
type: Opaque
data:
awsAccessKey: {{ .Values.awsCredentials.awsAccessKey | b64enc}}
awsSecretKey: {{ .Values.awsCredentials.awsSecretKey | b64enc}}

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "aws-limits-exporter.fullname" . }}
labels:
app: aws-limits-exporter
{{- include "aws-limits-exporter.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http-metrics
protocol: TCP
name: http-metrics
selector:
{{- include "aws-limits-exporter.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "aws-limits-exporter.serviceAccountName" . }}
labels:
{{- include "aws-limits-exporter.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,19 +0,0 @@
{{- if .Values.prometheusScraping.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "aws-limits-exporter.fullname" . }}
labels:
{{- include "aws-limits-exporter.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
app: aws-limits-exporter
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: http-metrics
path: /metrics
interval: "{{ .Values.prometheusScraping.scrapInterval }}"
{{- end }}

View File

@@ -1,65 +0,0 @@
replicaCount: 1
image:
repository: danielfm/aws-limits-exporter
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart version.
tag: "0.3.0"
imagePullSecrets: []
nameOverride: "aws-limits-exporter"
fullnameOverride: "aws-limits-exporter"
awsCredentials:
awsAccessKey: ""
awsSecretKey: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
labels:
app.kubernetes.io/name: aws-limits-exporter
selectorLabels:
app.kubernetes.io/name: aws-limits-exporter
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
prometheusScraping:
enabled: true
scrapInterval: "60s"

View File

@@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,27 +0,0 @@
apiVersion: v1
appVersion: 1.5.0
description: A Helm chart for the AWS Node Termination Handler
home: https://github.com/aws/eks-charts
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
keywords:
- eks
- ec2
- node-termination
- spot
maintainers:
- email: nckturner@users.noreply.github.com
name: Nicholas Turner
url: https://github.com/nckturner
- email: stefanprodan@users.noreply.github.com
name: Stefan Prodan
url: https://github.com/stefanprodan
- email: jillmon@users.noreply.github.com
name: Jillian Montalvo
url: https://github.com/jillmon
- email: mattrandallbecker@users.noreply.github.com
name: Matthew Becker
url: https://github.com/mattrandallbecker
name: aws-node-termination-handler
sources:
- https://github.com/aws/eks-charts
version: 0.8.0

View File

@@ -1,96 +0,0 @@
# AWS Node Termination Handler
AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler.
## Prerequisites
* Kubernetes >= 1.11
## Installing the Chart
Add the EKS repository to Helm:
```sh
helm repo add eks https://aws.github.io/eks-charts
```
Install AWS Node Termination Handler:
To install the chart with the release name aws-node-termination-handler and default configuration:
```sh
helm install --name aws-node-termination-handler \
--namespace kube-system eks/aws-node-termination-handler
```
To install into an EKS cluster where the Node Termination Handler is already installed, you can run:
```sh
helm upgrade --install --recreate-pods --force \
aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler
```
If you receive an error similar to `Error: release aws-node-termination-handler
failed: <resource> "aws-node-termination-handler" already exists`, simply rerun
the above command.
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `aws-node-termination-handler` deployment:
```sh
helm delete --purge aws-node-termination-handler
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the chart and their default values.
Parameter | Description | Default
--- | --- | ---
`image.repository` | image repository | `amazon/aws-node-termination-handler`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`image.pullSecrets` | image pull secrets (for private docker registries) | `[]`
`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false`
`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30`
`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30`
`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120`
`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true`
`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80`
`webhookURL` | Posts event data to URL upon instance interruption action | ``
`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | ``
`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}`
`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}`
`dryRun` | If true, only log if a node would be drained | `false`
`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false`
`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true`
`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3`
`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false`
`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false`
`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false`
`affinity` | node/pod affinities | None
`podAnnotations` | annotations to add to each pod | `{}`
`priorityClassName` | Name of the priorityClass | `system-node-critical`
`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi`
`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet`
`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}`
`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]`
`rbac.create` | if `true`, create and use RBAC resources | `true`
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
`serviceAccount.create` | If `true`, create a new service account | `true`
`serviceAccount.name` | Service account to be used | None
`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}`
`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime`
`securityContext.runAsUserID` | User ID to run the container | `1000`
`securityContext.runAsGroupID` | Group ID to run the container | `1000`
`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os`
`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch`
`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false`
`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092`
## Metrics endpoint consideration
If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace
since it's listening on all interfaces.
Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.

View File

@@ -1,57 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-node-termination-handler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-node-termination-handler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-node-termination-handler.labels" -}}
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node-termination-handler
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-node-termination-handler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "aws-node-termination-handler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "aws-node-termination-handler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,37 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get

View File

@@ -1,12 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "aws-node-termination-handler.fullname" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,141 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "aws-node-termination-handler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
spec:
updateStrategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node-termination-handler
spec:
volumes:
- name: "uptime"
hostPath:
path: "{{ .Values.procUptimeFile }}"
priorityClassName: "{{ .Values.priorityClassName }}"
affinity:
nodeAffinity:
# NOTE(jaypipes): Change when we complete
# https://github.com/aws/aws-node-termination-handler/issues/8
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }}
operator: In
values:
- linux
- key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }}
operator: In
values:
- amd64
- arm
- arm64
serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }}
hostNetwork: true
dnsPolicy: {{ .Values.dnsPolicy }}
containers:
- name: {{ include "aws-node-termination-handler.name" . }}
image: {{ .Values.image.repository}}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{ .Values.securityContext.runAsUserID }}
runAsGroup: {{ .Values.securityContext.runAsGroupID }}
allowPrivilegeEscalation: false
volumeMounts:
- name: "uptime"
mountPath: "/proc/uptime"
readOnly: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SPOT_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DELETE_LOCAL_DATA
value: {{ .Values.deleteLocalData | quote }}
- name: IGNORE_DAEMON_SETS
value: {{ .Values.ignoreDaemonSets | quote }}
- name: GRACE_PERIOD
value: {{ .Values.gracePeriod | quote }}
- name: POD_TERMINATION_GRACE_PERIOD
value: {{ .Values.podTerminationGracePeriod | quote }}
- name: INSTANCE_METADATA_URL
value: {{ .Values.instanceMetadataURL | quote }}
- name: NODE_TERMINATION_GRACE_PERIOD
value: {{ .Values.nodeTerminationGracePeriod | quote }}
- name: WEBHOOK_URL
value: {{ .Values.webhookURL | quote }}
- name: WEBHOOK_HEADERS
value: {{ .Values.webhookHeaders | quote }}
- name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }}
- name: DRY_RUN
value: {{ .Values.dryRun | quote }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: {{ .Values.enableSpotInterruptionDraining | quote }}
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: {{ .Values.enableScheduledEventDraining | quote }}
- name: METADATA_TRIES
value: {{ .Values.metadataTries | quote }}
- name: CORDON_ONLY
value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE
value: {{ .Values.taintNode | quote }}
- name: JSON_LOGGING
value: {{ .Values.jsonLogging | quote }}
- name: WEBHOOK_PROXY
value: {{ .Values.webhookProxy | quote }}
- name: ENABLE_PROMETHEUS_SERVER
value: {{ .Values.enablePrometheusServer | quote }}
- name: PROMETHEUS_SERVER_PORT
value: {{ .Values.prometheusServerPort | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,57 +0,0 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: false
hostIPC: false
hostNetwork: true
hostPID: false
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
allowedCapabilities:
- '*'
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "aws-node-termination-handler.fullname" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "aws-node-termination-handler.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "aws-node-termination-handler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
{{ include "aws-node-termination-handler.labels" . | indent 4 }}
{{- end -}}

View File

@@ -1,102 +0,0 @@
# Default values for aws-node-termination-handler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: amazon/aws-node-termination-handler
tag: v1.5.0
pullPolicy: IfNotPresent
pullSecrets: []
securityContext:
runAsUserID: 1000
runAsGroupID: 1000
nameOverride: ""
fullnameOverride: ""
priorityClassName: system-node-critical
podAnnotations: {}
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved
enableSpotInterruptionDraining: ""
## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event
enableScheduledEventDraining: ""
taintNode: false
## dryRun tells node-termination-handler to only log calls to kubernetes control plane
dryRun: false
# deleteLocalData tells kubectl to continue even if there are pods using
# emptyDir (local data that will be deleted when the node is drained).
deleteLocalData: ""
# ignoreDaemonSets causes kubectl to skip Daemon Set managed pods.
ignoreDaemonSets: ""
# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully.
# If negative, the default value specified in the pod will be used.
gracePeriod: ""
podTerminationGracePeriod: ""
# nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event.
nodeTerminationGracePeriod: ""
# webhookURL if specified, posts event data to URL upon instance interruption action.
webhookURL: ""
# webhookProxy if specified, uses this HTTP(S) proxy configuration.
webhookProxy: ""
# webhookHeaders if specified, replaces the default webhook headers.
webhookHeaders: ""
# webhookTemplate if specified, replaces the default webhook message template.
webhookTemplate: ""
# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80)
instanceMetadataURL: ""
# (TESTING USE): Mount path for uptime file
procUptimeFile: "/proc/uptime"
# nodeSelector tells the daemonset where to place the node-termination-handler
# pods. By default, this value is empty and every node will receive a pod.
nodeSelector: {}
nodeSelectorTermsOs: ""
nodeSelectorTermsArch: ""
enablePrometheusServer: false
prometheusServerPort: "9092"
tolerations:
- operator: "Exists"
affinity: {}
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use. If namenot set and create is true,
# a name is generated using fullname template
name:
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
rbac:
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
pspEnabled: true
dnsPolicy: "ClusterFirstWithHostNet"

View File

@@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v1
description: A Helm chart for the AWS UI View
name: aws-ui-view
version: 1.0.0

View File

@@ -1,47 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-ui-view.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-ui-view.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-ui-view.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-ui-view.labels" -}}
app.kubernetes.io/name: {{ include "aws-ui-view.name" . }}
helm.sh/chart: {{ include "aws-ui-view.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@@ -1,35 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: {{ include "aws-ui-view.fullname" . }}
rules:
- apiGroups:
- '*'
resources:
- nodes
- namespaces
- pods
- events
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- statefulsets
- replicasets
verbs:
- get
- list
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list

View File

@@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "aws-ui-view.fullname" . }}
subjects:
- kind: Group
name: Admins
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: {{ include "aws-ui-view.fullname" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,3 +0,0 @@
nameOverride: aws-ui-view
fullnameOverride: "aws-ui-view"

View File

@@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,18 +0,0 @@
apiVersion: v1
appVersion: v1.7.5
description: A Helm chart for the AWS VPC CNI
home: https://github.com/aws/amazon-vpc-cni-k8s
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
keywords:
- eks
- cni
- networking
- vpc
maintainers:
- email: jayanthvn@users.noreply.github.com
name: Jayanth Varavani
url: https://github.com/jayanthvn
name: aws-vpc-cni
sources:
- https://github.com/aws/amazon-vpc-cni-k8s
version: 1.1.3

View File

@@ -1,94 +0,0 @@
# AWS VPC CNI
This chart installs the AWS CNI Daemonset: https://github.com/aws/amazon-vpc-cni-k8s
## Prerequisites
- Kubernetes 1.11+ running on AWS
## Installing the Chart
First add the EKS repository to Helm:
```shell
helm repo add eks https://aws.github.io/eks-charts
```
To install the chart with the release name `aws-vpc-cni` and default configuration:
```shell
$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni
```
To install into an EKS cluster where the CNI is already installed, see [this section below](#adopting-the-existing-aws-node-resources-in-an-eks-cluster)
## Configuration
The following table lists the configurable parameters for this chart and their default values.
| Parameter | Description | Default |
| ------------------------|---------------------------------------------------------|-------------------------------------|
| `affinity` | Map of node/pod affinities | `{}` |
| `cniConfig.enabled` | Enable overriding the default 10-aws.conflist file | `false` |
| `cniConfig.fileContents`| The contents of the custom cni config file | `nil` |
| `eniConfig.create` | Specifies whether to create ENIConfig resource(s) | `false` |
| `eniConfig.region` | Region to use when generating ENIConfig resource names | `us-west-2` |
| `eniConfig.subnets` | A map of AZ identifiers to config per AZ | `nil` |
| `eniConfig.subnets.id` | The ID of the subnet within the AZ which will be used in the ENIConfig | `nil` |
| `eniConfig.subnets.securityGroups` | The IDs of the security groups which will be used in the ENIConfig | `nil` |
| `env` | List of environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) |
| `fullnameOverride` | Override the fullname of the chart | `aws-node` |
| `image.region` | ECR repository region to use. Should match your cluster | `us-west-2` |
| `image.tag` | Image tag | `v1.7.5` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.override` | A custom docker image to use | `nil` |
| `imagePullSecrets` | Docker registry pull secret | `[]` |
| `init.image.region` | ECR repository region to use. Should match your cluster | `us-west-2` |
| `init.image.tag` | Image tag | `v1.7.5` |
| `init.image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `init.image.override` | A custom docker image to use | `nil` |
| `init.env` | List of init container environment variables. See [here](https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables) for options | (see `values.yaml`) |
| `init.securityContext` | Init container Security context | `privileged: true` |
| `originalMatchLabels` | Use the original daemonset matchLabels | `false` |
| `nameOverride` | Override the name of the chart | `aws-node` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `podSecurityContext` | Pod Security Context | `{}` |
| `podAnnotations` | annotations to add to each pod | `{}` |
| `priorityClassName` | Name of the priorityClass | `system-node-critical` |
| `resources` | Resources for the pods | `requests.cpu: 10m` |
| `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` |
| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` |
| `livenessProbe` | Livenness probe settings for daemonset | (see `values.yaml`) |
| `readinessProbe` | Readiness probe settings for daemonset | (see `values.yaml`) |
| `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` |
| `tolerations` | Optional deployment tolerations | `[]` |
| `updateStrategy` | Optional update strategy | `type: RollingUpdate` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters:
```shell
$ helm install --name aws-vpc-cni --namespace kube-system eks/aws-vpc-cni --values values.yaml
```
## Adopting the existing aws-node resources in an EKS cluster
If you do not want to delete the existing aws-node resources in your cluster that run the aws-vpc-cni and then install this helm chart, you can adopt the resources into a release instead. This process is highlighted in this [PR comment](https://github.com/aws/eks-charts/issues/57#issuecomment-628403245). Once you have annotated and labeled all the resources this chart specifies, enable the `originalMatchLabels` flag, and also set `crd.create` to false on the helm release and run an update. If you have been careful this should not diff and leave all the resources unmodified and now under management of helm.
Here is an example script to modify the existing resources:
WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release.
```
#!/usr/bin/env bash
set -euo pipefail
# don't import the crd. Helm cant manage the lifecycle of it anyway.
for kind in daemonSet clusterRole clusterRoleBinding serviceAccount; do
echo "setting annotations and labels on $kind/aws-node"
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE
kubectl -n kube-system annotate --overwrite $kind aws-node meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind aws-node app.kubernetes.io/managed-by=Helm
done
```

View File

@@ -1,57 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-vpc-cni.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-vpc-cni.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-vpc-cni.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-vpc-cni.labels" -}}
app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }}
helm.sh/chart: {{ include "aws-vpc-cni.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "aws-vpc-cni.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "aws-vpc-cni.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,25 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "aws-vpc-cni.fullname" . }}
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
rules:
- apiGroups:
- crd.k8s.amazonaws.com
resources:
- eniconfigs
verbs: ["list", "watch", "get"]
- apiGroups: [""]
resources:
- pods
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch", "get", "update"]
- apiGroups: ["extensions"]
resources:
- '*'
verbs: ["list", "watch"]

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "aws-vpc-cni.fullname" . }}
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "aws-vpc-cni.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "aws-vpc-cni.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,10 +0,0 @@
{{- if .Values.cniConfig.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "aws-vpc-cni.fullname" . }}
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
data:
10-aws.conflist: {{ .Values.cniConfig.fileContents | b64enc }}
{{- end -}}

View File

@@ -1,19 +0,0 @@
{{- if .Values.crd.create -}}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
spec:
scope: Cluster
group: crd.k8s.amazonaws.com
versions:
- name: v1alpha1
served: true
storage: true
names:
plural: eniconfigs
singular: eniconfig
kind: ENIConfig
{{- end -}}

View File

@@ -1,138 +0,0 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ include "aws-vpc-cni.fullname" . }}
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
spec:
updateStrategy:
{{ toYaml .Values.updateStrategy | indent 4 }}
selector:
matchLabels:
{{- if .Values.originalMatchLabels }}
k8s-app: aws-node
{{- else }}
app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
template:
metadata:
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
k8s-app: aws-node
spec:
priorityClassName: "{{ .Values.priorityClassName }}"
serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }}
hostNetwork: true
initContainers:
- name: aws-vpc-cni-init
image: "{{- if .Values.init.image.override }}{{- .Values.init.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.init.image.region }}.amazonaws.com/amazon-k8s-cni-init:{{- .Values.init.image.tag }}{{- end}}"
imagePullPolicy: {{ .Values.init.image.pullPolicy }}
env:
{{- range $key, $value := .Values.init.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
securityContext:
{{- toYaml .Values.init.securityContext | nindent 12 }}
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
terminationGracePeriodSeconds: 10
tolerations:
- operator: Exists
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: aws-node
image: "{{- if .Values.image.override }}{{- .Values.image.override }}{{- else }}602401143452.dkr.ecr.{{- .Values.image.region }}.amazonaws.com/amazon-k8s-cni:{{- .Values.image.tag }}{{- end}}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 61678
name: metrics
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 12 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 12 }}
env:
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
{{- if .Values.cniConfig.enabled }}
# the dockerfile copies the baked in config to this location, lets overwrite it with ours
# the entrypoint.sh script will then copy our config to /host/etc/cni/net.d on boot
- name: cni-config
mountPath: /app/10-aws.conflist
subPath: 10-aws.conflist
{{- end }}
- mountPath: /host/var/log/aws-routed-eni
name: log-dir
- mountPath: /var/run/dockershim.sock
name: dockershim
- mountPath: /var/run/aws-node
name: run-dir
- mountPath: /run/xtables.lock
name: xtables-lock
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
{{- if .Values.cniConfig.enabled }}
- name: cni-config
configMap:
name: {{ include "aws-vpc-cni.fullname" . }}
{{- end }}
- name: dockershim
hostPath:
path: /var/run/dockershim.sock
- name: log-dir
hostPath:
path: /var/log/aws-routed-eni
type: DirectoryOrCreate
- name: run-dir
hostPath:
path: /var/run/aws-node
type: DirectoryOrCreate
- name: xtables-lock
hostPath:
path: /run/xtables.lock
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,17 +0,0 @@
{{- if .Values.eniConfig.create }}
{{- range $key, $value := (required ".Values.eniConfig.subnets must be specified" .Values.eniConfig.subnets) }}
apiVersion: crd.k8s.amazonaws.com/v1alpha1
kind: ENIConfig
metadata:
name: {{ required ".Values.eniConfig.region must be specified" $.Values.eniConfig.region }}{{ $key }}
spec:
{{- if $value.securityGroups }}
securityGroups:
{{- range $sg := $value.securityGroups }}
- {{ $sg }}
{{- end }}
{{- end }}
subnet: {{ $value.id }}
---
{{- end }}
{{- end }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "aws-vpc-cni.serviceAccountName" . }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
{{ include "aws-vpc-cni.labels" . | indent 4 }}
{{- end -}}

View File

@@ -1,161 +0,0 @@
# Default values for aws-vpc-cni.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This default name override is to maintain backwards compatability with
# existing naming
nameOverride: aws-node
init:
image:
tag: v1.7.5
region: us-west-2
pullPolicy: Always
# Set to use custom image
# override: "repo/org/image:tag"
env:
DISABLE_TCP_EARLY_DEMUX: "false"
securityContext:
privileged: true
image:
region: us-west-2
tag: v1.7.5
pullPolicy: Always
# Set to use custom image
# override: "repo/org/image:tag"
# The CNI supports a number of environment variable settings
# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables
env:
ADDITIONAL_ENI_TAGS: "{}"
AWS_VPC_CNI_NODE_PORT_SUPPORT: "true"
AWS_VPC_ENI_MTU: "9001"
AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false"
AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false"
AWS_VPC_K8S_CNI_EXTERNALSNAT: "false"
AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log"
AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG
AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng"
AWS_VPC_K8S_CNI_VETHPREFIX: eni
AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log"
AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG
DISABLE_INTROSPECTION: "false"
DISABLE_METRICS: "false"
ENABLE_POD_ENI: "false"
WARM_ENI_TARGET: "1"
# this flag enables you to use the match label that was present in the original daemonset deployed by EKS
# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release
originalMatchLabels: false
cniConfig:
enabled: false
fileContents: ""
imagePullSecrets: []
fullnameOverride: "aws-node"
priorityClassName: system-node-critical
podSecurityContext: {}
podAnnotations: {}
securityContext:
capabilities:
add:
- "NET_ADMIN"
crd:
create: true
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
livenessProbe:
exec:
command:
- /app/grpc-health-probe
- '-addr=:50051'
initialDelaySeconds: 60
readinessProbe:
exec:
command:
- /app/grpc-health-probe
- '-addr=:50051'
initialDelaySeconds: 1
resources:
requests:
cpu: 10m
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: "10%"
nodeSelector: {}
tolerations: []
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "beta.kubernetes.io/os"
operator: In
values:
- linux
- key: "beta.kubernetes.io/arch"
operator: In
values:
- amd64
- arm64
- key: "eks.amazonaws.com/compute-type"
operator: NotIn
values:
- fargate
- matchExpressions:
- key: "kubernetes.io/os"
operator: In
values:
- linux
- key: "kubernetes.io/arch"
operator: In
values:
- amd64
- arm64
- key: "eks.amazonaws.com/compute-type"
operator: NotIn
values:
- fargate
eniConfig:
# Specifies whether ENIConfigs should be created
create: false
region: us-west-2
subnets:
# Key identifies the AZ
# Value contains the subnet ID and security group IDs within that AZ
# a:
# id: subnet-123
# securityGroups:
# - sg-123
# b:
# id: subnet-456
# securityGroups:
# - sg-456
# c:
# id: subnet-789
# securityGroups:
# - sg-789

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,6 +0,0 @@
apiVersion: v2
name: iam-eks-user-mapper
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: 0.1.0

View File

@@ -1,63 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "iam-eks-user-mapper.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "iam-eks-user-mapper.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "iam-eks-user-mapper.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "iam-eks-user-mapper.labels" -}}
helm.sh/chart: {{ include "iam-eks-user-mapper.chart" . }}
{{ include "iam-eks-user-mapper.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "iam-eks-user-mapper.selectorLabels" -}}
app.kubernetes.io/name: {{ include "iam-eks-user-mapper.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "iam-eks-user-mapper.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "iam-eks-user-mapper.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,65 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "iam-eks-user-mapper.fullname" . }}
namespace: kube-system
labels:
{{- include "iam-eks-user-mapper.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "iam-eks-user-mapper.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "iam-eks-user-mapper.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "iam-eks-user-mapper.serviceAccountName" . }}
automountServiceAccountToken: true
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: AWS_REGION
value: {{ .Values.aws.region }}
- name: AWS_ACCESS_KEY_ID
value: {{ .Values.aws.accessKey }}
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: awsKey
name: {{ include "iam-eks-user-mapper.fullname" . }}
command:
- ./app
- --aws-iam-group
- {{ .Values.syncIamGroup }}
- --k8s-cap
- system:masters
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,24 +0,0 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: eks-configmap-modifier-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "update"]
resourceNames: ["aws-auth"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: eks-configmap-modifier-rolebinding
subjects:
- kind: ServiceAccount
name: {{ include "iam-eks-user-mapper.serviceAccountName" . }}
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: eks-configmap-modifier-role

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "iam-eks-user-mapper.fullname" . }}
namespace: kube-system
labels:
{{- include "iam-eks-user-mapper.labels" . | nindent 4 }}
type: Opaque
data:
awsKey: {{ .Values.aws.secretKey | b64enc }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "iam-eks-user-mapper.serviceAccountName" . }}
namespace: kube-system
labels:
{{- include "iam-eks-user-mapper.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,65 +0,0 @@
# Default values for iam-eks-user-mapper.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: ygrene/iam-eks-user-mapper
pullPolicy: IfNotPresent
tag: "latest"
aws:
accessKey: ""
secretKey: ""
region: ""
syncIamGroup: ""
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: "iam-eks-user-mapper"
labels:
app: iam-eks-user-mapper
selectorLabels:
app: iam-eks-user-mapper
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -66,16 +66,4 @@ resource "aws_docdb_subnet_group" "documentdb" {
subnet_ids = flatten([aws_subnet.documentdb_zone_a.*.id, aws_subnet.documentdb_zone_b.*.id, aws_subnet.documentdb_zone_c.*.id])
tags = local.tags_documentdb
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "documentdb_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow DocumentDB incoming access from anywhere"
from_port = 27017
protocol = "tcp"
security_group_id = aws_security_group.ec2_instance.id
to_port = 27017
type = "ingress"
}
}

View File

@@ -21,4 +21,20 @@ resource "aws_security_group_rule" "https" {
security_group_id = aws_security_group.ec2_instance.id
to_port = 443
type = "ingress"
}
# randomize inbound kubernetes port number for more security
resource "random_integer" "kubernetes_external_port" {
min = 1024
max = 65534
}
resource "aws_security_group_rule" "kubernetes" {
cidr_blocks = ["0.0.0.0/0"]
description = "Kubernetes connectivity"
from_port = random_integer.kubernetes_external_port.result
protocol = "tcp"
security_group_id = aws_security_group.ec2_instance.id
to_port = random_integer.kubernetes_external_port.result
type = "ingress"
}

View File

@@ -36,6 +36,7 @@ resource "aws_instance" "ec2_instance" {
security_groups = [aws_security_group.ec2_instance.id]
user_data = local.bootstrap
user_data_replace_on_change = false
tags = merge(
local.tags_common,
@@ -43,6 +44,10 @@ resource "aws_instance" "ec2_instance" {
"Service" = "EC2"
}
)
depends_on = [
aws_s3_bucket.kubeconfigs_bucket
]
}
resource "time_static" "on_ec2_create" {}
@@ -50,12 +55,16 @@ resource "time_static" "on_ec2_create" {}
locals {
bootstrap = <<BOOTSTRAP
#!/bin/bash
export KUBECONFIG_FILENAME="${var.kubernetes_cluster_id}.yaml"
export KUBECONFIG_PATH="/tmp/$KUBECONFIG_FILENAME"
apt-get update
apt-get -y install curl s3cmd
export INSTALL_K3S_VERSION=${var.k3s_config.version}
export INSTALL_K3S_CHANNEL=${var.k3s_config.channel}
export INSTALL_K3S_EXEC="${var.k3s_config.exec}"
export INSTALL_K3S_EXEC="--https-listen-port=${random_integer.kubernetes_external_port.result} ${var.k3s_config.exec}"
curl -sfL https://get.k3s.io | sh -
echo 'export KUBECONFIG=/etc/rancher/k3s/k3s.yaml' >> /etc/profile
@@ -63,6 +72,10 @@ while [ ! -f /etc/rancher/k3s/k3s.yaml ] ; do
echo "kubeconfig is not yet present, sleeping"
sleep 1
done
s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put /etc/rancher/k3s/k3s.yaml s3://${var.s3_bucket_kubeconfig}/${var.kubernetes_cluster_id}.yaml
# Calico will be installed and metadata won't be accessible anymore, it can only be done during bootstrap
sed -r "s/127.0.0.1:6443/$(curl -s http://169.254.169.254/latest/meta-data/public-hostname):${random_integer.kubernetes_external_port.result}/g" /etc/rancher/k3s/k3s.yaml > $KUBECONFIG_PATH
s3cmd --access_key={{ aws_access_key }} --secret_key={{ aws_secret_key }} --region={{ aws_region }} put $KUBECONFIG_PATH s3://${var.s3_bucket_kubeconfig}/$KUBECONFIG_FILENAME
rm -f $KUBECONFIG_PATH
BOOTSTRAP
}

View File

@@ -65,16 +65,4 @@ resource "aws_elasticache_subnet_group" "elasticache" {
# WARNING: this "name" value is used into elasticache clusters, you need to update it accordingly
name = "elasticache-${aws_vpc.ec2.id}"
subnet_ids = flatten([aws_subnet.elasticache_zone_a.*.id, aws_subnet.elasticache_zone_b.*.id, aws_subnet.elasticache_zone_c.*.id])
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "elasticache_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow Redis incoming access from anywhere"
from_port = 6379
protocol = "tcp"
security_group_id = aws_security_group.ec2_instance.id
to_port = 6379
type = "ingress"
}
}

View File

@@ -93,26 +93,4 @@ resource "aws_iam_role" "rds_enhanced_monitoring" {
resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" {
role = aws_iam_role.rds_enhanced_monitoring.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
}
# Todo: create a bastion to avoid this
resource "aws_security_group_rule" "postgres_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow RDS PostgreSQL incoming access from anywhere"
from_port = 5432
protocol = "tcp"
security_group_id = aws_security_group.ec2_instance.id
to_port = 5432
type = "ingress"
}
resource "aws_security_group_rule" "mysql_remote_access" {
cidr_blocks = ["0.0.0.0/0"]
description = "Allow RDS MySQL incoming access from anywhere"
from_port = 3306
protocol = "tcp"
security_group_id = aws_security_group.ec2_instance.id
to_port = 3306
type = "ingress"
}
}

View File

@@ -1,11 +1,7 @@
// S3 bucket to store kubeconfigs
resource "aws_s3_bucket" "kubeconfigs_bucket" {
bucket = var.s3_bucket_kubeconfig
acl = "private"
force_destroy = true
versioning {
enabled = true
}
tags = merge(
local.tags_ec2,
@@ -13,17 +9,30 @@ resource "aws_s3_bucket" "kubeconfigs_bucket" {
"Name" = "Kubernetes kubeconfig"
}
)
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn
sse_algorithm = "aws:kms"
}
resource "aws_s3_bucket_acl" "kubeconfigs_bucket_acl" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
acl = "private"
}
resource "aws_s3_bucket_server_side_encryption_configuration" "kubeconfigs_bucket_encryption" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.s3_kubeconfig_kms_encryption.arn
sse_algorithm = "aws:kms"
}
}
}
resource "aws_s3_bucket_versioning" "kubeconfigs_bucket_versionning" {
bucket = aws_s3_bucket.kubeconfigs_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_kms_key" "s3_kubeconfig_kms_encryption" {
description = "s3 kubeconfig encryption"
tags = merge(

View File

@@ -0,0 +1,433 @@
use crate::cloud_provider::aws::kubernetes::{Options, VpcQoveryNetworkMode};
use crate::cloud_provider::helm::{
get_chart_for_cluster_agent, get_chart_for_shell_agent, ChartInfo, ChartSetValue, ClusterAgentContext, CommonChart,
CoreDNSConfigChart, HelmChart, HelmChartNamespaces, ShellAgentContext,
};
use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAgent, QoveryAppName};
use crate::errors::CommandError;
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AwsEc2QoveryTerraformConfig {
pub loki_storage_config_aws_s3: String,
pub aws_iam_loki_storage_key: String,
pub aws_iam_loki_storage_secret: String,
}
pub struct Ec2ChartsConfigPrerequisites {
pub organization_id: String,
pub organization_long_id: uuid::Uuid,
pub cluster_id: String,
pub cluster_long_id: uuid::Uuid,
pub region: String,
pub cluster_name: String,
pub cloud_provider: String,
pub test_cluster: bool,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
pub vpc_qovery_network_mode: VpcQoveryNetworkMode,
pub qovery_engine_location: EngineLocation,
pub ff_log_history_enabled: bool,
pub ff_metrics_history_enabled: bool,
pub managed_dns_name: String,
pub managed_dns_helm_format: String,
pub managed_dns_resolvers_terraform_format: String,
pub external_dns_provider: String,
pub dns_email_report: String,
pub acme_url: String,
pub cloudflare_email: String,
pub cloudflare_api_token: String,
pub disable_pleco: bool,
// qovery options form json input
pub infra_options: Options,
}
pub fn ec2_aws_helm_charts(
qovery_terraform_config_file: &str,
chart_config_prerequisites: &Ec2ChartsConfigPrerequisites,
chart_prefix_path: Option<&str>,
kubernetes_config: &Path,
envs: &[(String, String)],
) -> Result<Vec<Vec<Box<dyn HelmChart>>>, CommandError> {
let content_file = match File::open(&qovery_terraform_config_file) {
Ok(x) => x,
Err(e) => {
return Err(CommandError::new(
"Can't deploy helm chart as Qovery terraform config file has not been rendered by Terraform. Are you running it in dry run mode?".to_string(),
Some(e.to_string()),
Some(envs.to_vec()),
));
}
};
let chart_prefix = chart_prefix_path.unwrap_or("./");
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
let reader = BufReader::new(content_file);
let qovery_terraform_config: AwsEc2QoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
return Err(CommandError::new(
format!("Error while parsing terraform config file {}", qovery_terraform_config_file),
Some(e.to_string()),
Some(envs.to_vec()),
));
}
};
// Qovery storage class
let q_storage_class = CommonChart {
chart_info: ChartInfo {
name: "q-storageclass".to_string(),
path: chart_path("/charts/q-storageclass"),
..Default::default()
},
};
// Calico for AWS
let aws_calico = CommonChart {
chart_info: ChartInfo {
name: "calico".to_string(),
path: chart_path("charts/aws-calico"),
..Default::default()
},
};
let coredns_config = CoreDNSConfigChart {
chart_info: ChartInfo {
name: "coredns".to_string(),
path: chart_path("/charts/coredns-config"),
values: vec![
ChartSetValue {
key: "managed_dns".to_string(),
value: chart_config_prerequisites.managed_dns_helm_format.clone(),
},
ChartSetValue {
key: "managed_dns_resolvers".to_string(),
value: chart_config_prerequisites
.managed_dns_resolvers_terraform_format
.clone(),
},
],
..Default::default()
},
};
let external_dns = CommonChart {
chart_info: ChartInfo {
name: "externaldns".to_string(),
path: chart_path("common/charts/external-dns"),
values_files: vec![chart_path("chart_values/external-dns.yaml")],
values: vec![
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "50Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "50Mi".to_string(),
},
],
..Default::default()
},
};
let cert_manager = CommonChart {
chart_info: ChartInfo {
name: "cert-manager".to_string(),
path: chart_path("common/charts/cert-manager"),
namespace: HelmChartNamespaces::CertManager,
values: vec![
ChartSetValue {
key: "installCRDs".to_string(),
value: "true".to_string(),
},
ChartSetValue {
key: "replicaCount".to_string(),
value: "1".to_string(),
},
// https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check
ChartSetValue {
key: "extraArgs".to_string(),
value: "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}"
.to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.enabled".to_string(),
// Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require
// prometheus to be already installed
value: "false".to_string(),
},
ChartSetValue {
key: "prometheus.servicemonitor.prometheusInstance".to_string(),
value: "qovery".to_string(),
},
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "1Gi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "1Gi".to_string(),
},
// Webhooks resources limits
ChartSetValue {
key: "webhook.resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.cpu".to_string(),
value: "50m".to_string(),
},
ChartSetValue {
key: "webhook.resources.limits.memory".to_string(),
value: "128Mi".to_string(),
},
ChartSetValue {
key: "webhook.resources.requests.memory".to_string(),
value: "128Mi".to_string(),
},
// Cainjector resources limits
ChartSetValue {
key: "cainjector.resources.limits.cpu".to_string(),
value: "500m".to_string(),
},
ChartSetValue {
key: "cainjector.resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "cainjector.resources.limits.memory".to_string(),
value: "1Gi".to_string(),
},
ChartSetValue {
key: "cainjector.resources.requests.memory".to_string(),
value: "1Gi".to_string(),
},
],
..Default::default()
},
};
let mut cert_manager_config = CommonChart {
chart_info: ChartInfo {
name: "cert-manager-configs".to_string(),
path: chart_path("common/charts/cert-manager-configs"),
namespace: HelmChartNamespaces::CertManager,
values: vec![
ChartSetValue {
key: "externalDnsProvider".to_string(),
value: chart_config_prerequisites.external_dns_provider.clone(),
},
ChartSetValue {
key: "acme.letsEncrypt.emailReport".to_string(),
value: chart_config_prerequisites.dns_email_report.clone(),
},
ChartSetValue {
key: "acme.letsEncrypt.acmeUrl".to_string(),
value: chart_config_prerequisites.acme_url.clone(),
},
ChartSetValue {
key: "managedDns".to_string(),
value: chart_config_prerequisites.managed_dns_helm_format.clone(),
},
],
..Default::default()
},
};
if chart_config_prerequisites.external_dns_provider == "cloudflare" {
cert_manager_config.chart_info.values.push(ChartSetValue {
key: "provider.cloudflare.apiToken".to_string(),
value: chart_config_prerequisites.cloudflare_api_token.clone(),
});
cert_manager_config.chart_info.values.push(ChartSetValue {
key: "provider.cloudflare.email".to_string(),
value: chart_config_prerequisites.cloudflare_email.clone(),
})
}
let nginx_ingress = CommonChart {
chart_info: ChartInfo {
name: "nginx-ingress".to_string(),
path: chart_path("common/charts/ingress-nginx"),
namespace: HelmChartNamespaces::NginxIngress,
// Because of NLB, svc can take some time to start
timeout_in_seconds: 300,
values_files: vec![chart_path("chart_values/nginx-ingress.yaml")],
values: vec![
// Controller resources limits
ChartSetValue {
key: "controller.resources.limits.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "controller.resources.requests.cpu".to_string(),
value: "100m".to_string(),
},
ChartSetValue {
key: "controller.resources.limits.memory".to_string(),
value: "768Mi".to_string(),
},
ChartSetValue {
key: "controller.resources.requests.memory".to_string(),
value: "768Mi".to_string(),
},
// Default backend resources limits
ChartSetValue {
key: "defaultBackend.resources.limits.cpu".to_string(),
value: "20m".to_string(),
},
ChartSetValue {
key: "defaultBackend.resources.requests.cpu".to_string(),
value: "10m".to_string(),
},
ChartSetValue {
key: "defaultBackend.resources.limits.memory".to_string(),
value: "32Mi".to_string(),
},
ChartSetValue {
key: "defaultBackend.resources.requests.memory".to_string(),
value: "32Mi".to_string(),
},
],
..Default::default()
},
};
let cluster_agent_context = ClusterAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token,
organization_long_id: &chart_config_prerequisites.organization_long_id,
cluster_id: &chart_config_prerequisites.cluster_id,
cluster_long_id: &chart_config_prerequisites.cluster_long_id,
cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let cluster_agent = get_chart_for_cluster_agent(cluster_agent_context, chart_path)?;
let shell_context = ShellAgentContext {
api_url: &chart_config_prerequisites.infra_options.qovery_api_url,
api_token: &chart_config_prerequisites.infra_options.agent_version_controller_token,
organization_long_id: &chart_config_prerequisites.organization_long_id,
cluster_id: &chart_config_prerequisites.cluster_id,
cluster_long_id: &chart_config_prerequisites.cluster_long_id,
cluster_token: &chart_config_prerequisites.infra_options.qovery_cluster_secret_token,
grpc_url: &chart_config_prerequisites.infra_options.qovery_grpc_url,
};
let shell_agent = get_chart_for_shell_agent(shell_context, chart_path)?;
let qovery_agent_version: QoveryAgent = get_qovery_app_version(
QoveryAppName::Agent,
&chart_config_prerequisites.infra_options.agent_version_controller_token,
&chart_config_prerequisites.infra_options.qovery_api_url,
&chart_config_prerequisites.cluster_id,
)?;
let mut qovery_agent = CommonChart {
chart_info: ChartInfo {
name: "qovery-agent".to_string(),
path: chart_path("common/charts/qovery/qovery-agent"),
namespace: HelmChartNamespaces::Qovery,
values: vec![
ChartSetValue {
key: "image.tag".to_string(),
value: qovery_agent_version.version,
},
ChartSetValue {
key: "replicaCount".to_string(),
value: "1".to_string(),
},
ChartSetValue {
key: "environmentVariables.GRPC_SERVER".to_string(),
value: chart_config_prerequisites.infra_options.qovery_grpc_url.to_string(),
},
ChartSetValue {
key: "environmentVariables.CLUSTER_TOKEN".to_string(),
value: chart_config_prerequisites
.infra_options
.qovery_cluster_secret_token
.to_string(),
},
ChartSetValue {
key: "environmentVariables.CLUSTER_ID".to_string(),
value: chart_config_prerequisites.cluster_long_id.to_string(),
},
ChartSetValue {
key: "environmentVariables.ORGANIZATION_ID".to_string(),
value: chart_config_prerequisites.organization_long_id.to_string(),
},
ChartSetValue {
key: "environmentVariables.LOKI_URL".to_string(),
value: format!("http://{}.cluster.local:3100", "not-installed"),
},
// resources limits
ChartSetValue {
key: "resources.limits.cpu".to_string(),
value: "1".to_string(),
},
ChartSetValue {
key: "resources.requests.cpu".to_string(),
value: "200m".to_string(),
},
ChartSetValue {
key: "resources.limits.memory".to_string(),
value: "500Mi".to_string(),
},
ChartSetValue {
key: "resources.requests.memory".to_string(),
value: "500Mi".to_string(),
},
],
..Default::default()
},
};
if chart_config_prerequisites.ff_log_history_enabled {
qovery_agent.chart_info.values.push(ChartSetValue {
key: "environmentVariables.FEATURES".to_string(),
value: "LogsHistory".to_string(),
})
}
// chart deployment order matters!!!
let level_1: Vec<Box<dyn HelmChart>> = vec![Box::new(q_storage_class), Box::new(coredns_config)];
let level_2: Vec<Box<dyn HelmChart>> = vec![Box::new(cert_manager)];
let level_3: Vec<Box<dyn HelmChart>> = vec![];
let level_4: Vec<Box<dyn HelmChart>> = vec![Box::new(aws_calico)];
let level_5: Vec<Box<dyn HelmChart>> = vec![Box::new(external_dns)];
let level_6: Vec<Box<dyn HelmChart>> = vec![Box::new(nginx_ingress)];
let level_7: Vec<Box<dyn HelmChart>> = vec![
Box::new(cert_manager_config),
Box::new(qovery_agent), // TODO: Migrate to the new cluster agent
Box::new(cluster_agent),
Box::new(shell_agent),
];
info!("charts configuration preparation finished");
Ok(vec![level_1, level_2, level_3, level_4, level_5, level_6, level_7])
}

View File

@@ -16,7 +16,7 @@ use std::thread::sleep;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AwsQoveryTerraformConfig {
pub struct AwsEksQoveryTerraformConfig {
pub aws_iam_eks_user_mapper_key: String,
pub aws_iam_eks_user_mapper_secret: String,
pub aws_iam_cluster_autoscaler_key: String,
@@ -28,7 +28,7 @@ pub struct AwsQoveryTerraformConfig {
pub aws_iam_loki_storage_secret: String,
}
pub struct ChartsConfigPrerequisites {
pub struct EksChartsConfigPrerequisites {
pub organization_id: String,
pub organization_long_id: uuid::Uuid,
pub cluster_id: String,
@@ -56,9 +56,9 @@ pub struct ChartsConfigPrerequisites {
pub infra_options: Options,
}
pub fn aws_helm_charts(
pub fn eks_aws_helm_charts(
qovery_terraform_config_file: &str,
chart_config_prerequisites: &ChartsConfigPrerequisites,
chart_config_prerequisites: &EksChartsConfigPrerequisites,
chart_prefix_path: Option<&str>,
kubernetes_config: &Path,
envs: &[(String, String)],
@@ -76,7 +76,7 @@ pub fn aws_helm_charts(
let chart_prefix = chart_prefix_path.unwrap_or("./");
let chart_path = |x: &str| -> String { format!("{}/{}", &chart_prefix, x) };
let reader = BufReader::new(content_file);
let qovery_terraform_config: AwsQoveryTerraformConfig = match serde_json::from_reader(reader) {
let qovery_terraform_config: AwsEksQoveryTerraformConfig = match serde_json::from_reader(reader) {
Ok(config) => config,
Err(e) => {
return Err(CommandError::new(

View File

@@ -8,12 +8,13 @@ use retry::OperationResult;
use serde::{Deserialize, Serialize};
use tera::Context as TeraContext;
use crate::cloud_provider::aws::kubernetes::helm_charts::{aws_helm_charts, ChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::ec2_helm_charts::{ec2_aws_helm_charts, Ec2ChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::eks_helm_charts::{eks_aws_helm_charts, EksChartsConfigPrerequisites};
use crate::cloud_provider::aws::kubernetes::roles::get_default_roles_to_create;
use crate::cloud_provider::aws::regions::{AwsRegion, AwsZones};
use crate::cloud_provider::helm::{deploy_charts_levels, ChartInfo};
use crate::cloud_provider::kubernetes::{
is_kubernetes_upgrade_required, uninstall_cert_manager, Kubernetes, ProviderOptions,
is_kubernetes_upgrade_required, uninstall_cert_manager, Kind, Kubernetes, ProviderOptions,
};
use crate::cloud_provider::models::{NodeGroups, NodeGroupsFormat};
use crate::cloud_provider::qovery::EngineLocation;
@@ -32,8 +33,9 @@ use crate::object_storage::s3::S3;
use crate::string::terraform_list_format;
pub mod ec2;
mod ec2_helm_charts;
pub mod eks;
pub mod helm_charts;
pub mod eks_helm_charts;
pub mod node;
pub mod roles;
@@ -606,36 +608,6 @@ fn create(
&listeners_helper,
);
// temporary: remove helm/kube management from terraform
match terraform_init_validate_state_list(temp_dir.as_str()) {
Ok(x) => {
let items_type = vec!["helm_release", "kubernetes_namespace"];
for item in items_type {
for entry in x.clone() {
if entry.starts_with(item) {
match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) {
Ok(_) => kubernetes.logger().log(EngineEvent::Info(
event_details.clone(),
EventMessage::new_from_safe(format!("successfully removed {}", &entry)),
)),
Err(e) => {
return Err(EngineError::new_terraform_cannot_remove_entry_out(
event_details,
entry.to_string(),
e,
));
}
}
};
}
}
}
Err(e) => kubernetes.logger().log(EngineEvent::Error(
EngineError::new_terraform_state_does_not_exist(event_details.clone(), e),
None,
)),
};
// terraform deployment dedicated to cloud resources
if let Err(e) = terraform_init_validate_plan_apply(temp_dir.as_str(), kubernetes.context().is_dry_run_deploy()) {
return Err(EngineError::new_terraform_error_while_executing_pipeline(event_details, e));
@@ -653,46 +625,97 @@ fn create(
.map(|x| (x.0.to_string(), x.1.to_string()))
.collect();
let charts_prerequisites = ChartsConfigPrerequisites {
organization_id: kubernetes.cloud_provider().organization_id().to_string(),
organization_long_id: kubernetes.cloud_provider().organization_long_id(),
infra_options: options.clone(),
cluster_id: kubernetes.id().to_string(),
cluster_long_id: kubernetes_long_id,
region: kubernetes.region(),
cluster_name: kubernetes.cluster_name(),
cloud_provider: "aws".to_string(),
test_cluster: kubernetes.context().is_test_cluster(),
aws_access_key_id: kubernetes.cloud_provider().access_key_id(),
aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(),
vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(),
qovery_engine_location: options.qovery_engine_location.clone(),
ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory),
ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory),
managed_dns_name: kubernetes.dns_provider().domain().to_string(),
managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(),
managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(kubernetes.dns_provider()),
external_dns_provider: kubernetes.dns_provider().provider_name().to_string(),
dns_email_report: options.tls_email_report.clone(),
acme_url: lets_encrypt_url(kubernetes.context()),
cloudflare_email: kubernetes.dns_provider().account().to_string(),
cloudflare_api_token: kubernetes.dns_provider().token().to_string(),
disable_pleco: kubernetes.context().disable_pleco(),
};
kubernetes.logger().log(EngineEvent::Info(
event_details.clone(),
EventMessage::new_from_safe("Preparing chart configuration to be deployed".to_string()),
));
let helm_charts_to_deploy = aws_helm_charts(
format!("{}/qovery-tf-config.json", &temp_dir).as_str(),
&charts_prerequisites,
Some(&temp_dir),
kubeconfig_path,
&credentials_environment_variables,
)
.map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?;
let helm_charts_to_deploy = match kubernetes.kind() {
Kind::Eks => {
let charts_prerequisites = EksChartsConfigPrerequisites {
organization_id: kubernetes.cloud_provider().organization_id().to_string(),
organization_long_id: kubernetes.cloud_provider().organization_long_id(),
infra_options: options.clone(),
cluster_id: kubernetes.id().to_string(),
cluster_long_id: kubernetes_long_id,
region: kubernetes.region(),
cluster_name: kubernetes.cluster_name(),
cloud_provider: "aws".to_string(),
test_cluster: kubernetes.context().is_test_cluster(),
aws_access_key_id: kubernetes.cloud_provider().access_key_id(),
aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(),
vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(),
qovery_engine_location: options.qovery_engine_location.clone(),
ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory),
ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory),
managed_dns_name: kubernetes.dns_provider().domain().to_string(),
managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(),
managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(
kubernetes.dns_provider(),
),
external_dns_provider: kubernetes.dns_provider().provider_name().to_string(),
dns_email_report: options.tls_email_report.clone(),
acme_url: lets_encrypt_url(kubernetes.context()),
cloudflare_email: kubernetes.dns_provider().account().to_string(),
cloudflare_api_token: kubernetes.dns_provider().token().to_string(),
disable_pleco: kubernetes.context().disable_pleco(),
};
eks_aws_helm_charts(
format!("{}/qovery-tf-config.json", &temp_dir).as_str(),
&charts_prerequisites,
Some(&temp_dir),
kubeconfig_path,
&credentials_environment_variables,
)
.map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?
}
Kind::Ec2 => {
let charts_prerequisites = Ec2ChartsConfigPrerequisites {
organization_id: kubernetes.cloud_provider().organization_id().to_string(),
organization_long_id: kubernetes.cloud_provider().organization_long_id(),
infra_options: options.clone(),
cluster_id: kubernetes.id().to_string(),
cluster_long_id: kubernetes_long_id,
region: kubernetes.region(),
cluster_name: kubernetes.cluster_name(),
cloud_provider: "aws".to_string(),
test_cluster: kubernetes.context().is_test_cluster(),
aws_access_key_id: kubernetes.cloud_provider().access_key_id(),
aws_secret_access_key: kubernetes.cloud_provider().secret_access_key(),
vpc_qovery_network_mode: options.vpc_qovery_network_mode.clone(),
qovery_engine_location: options.qovery_engine_location.clone(),
ff_log_history_enabled: kubernetes.context().is_feature_enabled(&Features::LogsHistory),
ff_metrics_history_enabled: kubernetes.context().is_feature_enabled(&Features::MetricsHistory),
managed_dns_name: kubernetes.dns_provider().domain().to_string(),
managed_dns_helm_format: kubernetes.dns_provider().domain().to_helm_format_string(),
managed_dns_resolvers_terraform_format: managed_dns_resolvers_terraform_format(
kubernetes.dns_provider(),
),
external_dns_provider: kubernetes.dns_provider().provider_name().to_string(),
dns_email_report: options.tls_email_report.clone(),
acme_url: lets_encrypt_url(kubernetes.context()),
cloudflare_email: kubernetes.dns_provider().account().to_string(),
cloudflare_api_token: kubernetes.dns_provider().token().to_string(),
disable_pleco: kubernetes.context().disable_pleco(),
};
ec2_aws_helm_charts(
format!("{}/qovery-tf-config.json", &temp_dir).as_str(),
&charts_prerequisites,
Some(&temp_dir),
kubeconfig_path,
&credentials_environment_variables,
)
.map_err(|e| EngineError::new_helm_charts_setup_error(event_details.clone(), e))?
}
_ => {
let safe_message = format!("unsupported requested cluster type: {}", kubernetes.kind());
return Err(EngineError::new_unsupported_cluster_kind(
event_details,
&safe_message,
CommandError::new(safe_message.to_string(), None, None),
));
}
};
deploy_charts_levels(
kubeconfig_path,

View File

@@ -31,7 +31,7 @@ use crate::cmd::helm::{to_engine_error, Helm};
use crate::cmd::kubectl::{
do_kubectl_exec_get_loadbalancer_id, kubectl_exec_get_all_namespaces, kubectl_exec_get_events,
};
use crate::cmd::terraform::{terraform_exec, terraform_init_validate_plan_apply, terraform_init_validate_state_list};
use crate::cmd::terraform::terraform_init_validate_plan_apply;
use crate::deletion_utilities::{get_firsts_namespaces_to_delete, get_qovery_managed_namespaces};
use crate::dns_provider::DnsProvider;
use crate::errors::{CommandError, EngineError, ErrorMessageVerbosity};
@@ -540,36 +540,6 @@ impl DOKS {
&listeners_helper,
);
// temporary: remove helm/kube management from terraform
match terraform_init_validate_state_list(temp_dir.as_str()) {
Ok(x) => {
let items_type = vec!["helm_release", "kubernetes_namespace"];
for item in items_type {
for entry in x.clone() {
if entry.starts_with(item) {
match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) {
Ok(_) => self.logger().log(EngineEvent::Info(
event_details.clone(),
EventMessage::new_from_safe(format!("successfully removed {}", &entry)),
)),
Err(e) => {
return Err(EngineError::new_terraform_cannot_remove_entry_out(
event_details,
entry.to_string(),
e,
))
}
}
};
}
}
}
Err(e) => self.logger().log(EngineEvent::Error(
EngineError::new_terraform_state_does_not_exist(event_details.clone(), e),
None,
)),
};
// Logs bucket
if let Err(e) = self.spaces.create_bucket(self.logs_bucket_name().as_str()) {
let error =

View File

@@ -664,36 +664,6 @@ impl Kapsule {
&listeners_helper,
);
// temporary: remove helm/kube management from terraform
match terraform_init_validate_state_list(temp_dir.as_str()) {
Ok(x) => {
let items_type = vec!["helm_release", "kubernetes_namespace"];
for item in items_type {
for entry in x.clone() {
if entry.starts_with(item) {
match terraform_exec(temp_dir.as_str(), vec!["state", "rm", &entry]) {
Ok(_) => self.logger().log(EngineEvent::Info(
event_details.clone(),
EventMessage::new_from_safe(format!("successfully removed {}", &entry)),
)),
Err(e) => {
return Err(EngineError::new_terraform_cannot_remove_entry_out(
event_details,
entry.to_string(),
e,
))
}
}
};
}
}
}
Err(e) => self.logger().log(EngineEvent::Error(
EngineError::new_terraform_state_does_not_exist(event_details.clone(), e),
None,
)),
};
// TODO(benjaminch): move this elsewhere
// Create object-storage buckets
self.logger().log(EngineEvent::Info(

View File

@@ -73,6 +73,7 @@ pub enum Tag {
HelmHistoryError,
CannotGetAnyAvailableVPC,
UnsupportedVersion,
UnsupportedClusterKind,
CannotGetSupportedVersions,
CannotGetCluster,
ContainerRegistryError,
@@ -224,6 +225,7 @@ impl From<errors::Tag> for Tag {
}
errors::Tag::BuilderError => Tag::BuilderError,
errors::Tag::ContainerRegistryError => Tag::ContainerRegistryError,
errors::Tag::UnsupportedClusterKind => Tag::UnsupportedClusterKind,
}
}
}

View File

@@ -175,6 +175,8 @@ pub enum Tag {
CannotGetWorkspaceDirectory,
/// UnsupportedInstanceType: represents an unsupported instance type for the given cloud provider.
UnsupportedInstanceType,
/// UnsupportedClusterKind: represents an unsupported cluster kind by Qovery.
UnsupportedClusterKind,
/// UnsupportedRegion: represents an unsupported region for the given cloud provider.
UnsupportedRegion,
/// UnsupportedZone: represents an unsupported zone in region for the given cloud provider.
@@ -623,6 +625,32 @@ impl EngineError {
)
}
/// Creates new error for unsupported cluster kind.
///
/// Qovery doesn't support this kind of clusters.
///
/// Arguments:
///
/// * `event_details`: Error linked event details.
/// * `requested_kind`: Raw requested instance type string.
/// * `error_message`: Raw error message.
pub fn new_unsupported_cluster_kind(
event_details: EventDetails,
new_unsupported_cluster_kind: &str,
error_message: CommandError,
) -> EngineError {
let message = format!("`{}` cluster kind is not supported", new_unsupported_cluster_kind);
EngineError::new(
event_details,
Tag::UnsupportedClusterKind,
message.to_string(),
message,
Some(error_message),
None, // TODO(documentation): Create a page entry to details this error
Some("Selected cluster kind is not supported, please check Qovery's documentation.".to_string()),
)
}
/// Creates new error for unsupported region.
///
/// Cloud provider doesn't support the requested region.

View File

@@ -12,8 +12,6 @@ use std::str::FromStr;
use test_utilities::aws::{K3S_KUBERNETES_MAJOR_VERSION, K3S_KUBERNETES_MINOR_VERSION};
use test_utilities::common::{cluster_test, ClusterDomain, ClusterTestType};
pub const _AWS_K3S_VERSION: &str = "v1.20.15+k3s1";
#[cfg(feature = "test-aws-infra-ec2")]
fn create_and_destroy_aws_ec2_k3s_cluster(
region: String,