diff --git a/Cargo.lock b/Cargo.lock index e9711f16..33b07210 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2115,6 +2115,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_yaml", "strum", "strum_macros", "sysinfo", @@ -2965,6 +2966,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +dependencies = [ + "indexmap", + "ryu", + "serde", + "yaml-rust", +] + [[package]] name = "sha-1" version = "0.8.2" @@ -4211,6 +4224,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.4.1" diff --git a/Cargo.toml b/Cargo.toml index 5d48f51b..1d074574 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,8 @@ tera = "1.10.0" serde = "1.0.126" serde_json = "1.0.64" serde_derive = "1.0.126" +serde_yaml = "0.8.23" + # AWS deps tokio = { version = "1.10.0", features = ["full"] } rusoto_core = "0.47.0" diff --git a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml index 40ab5de7..f31cea34 100644 --- a/lib/aws/bootstrap/charts/aws-calico/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 -appVersion: 3.13.4 +appVersion: 3.19.1 description: A Helm chart for installing Calico on AWS icon: https://www.projectcalico.org/wp-content/uploads/2019/09/Calico_Logo_Large_Calico.png name: aws-calico -version: 0.3.1 +version: 0.3.10 diff --git a/lib/aws/bootstrap/charts/aws-calico/README.md b/lib/aws/bootstrap/charts/aws-calico/README.md index 9abbca69..b5fb89ca 100644 --- a/lib/aws/bootstrap/charts/aws-calico/README.md +++ b/lib/aws/bootstrap/charts/aws-calico/README.md @@ -1,7 +1,11 @@ # Calico on AWS +**Note**: The recommended way to install calico on EKS is via tigera-opeartor instead of this helm-chart. +You can follow https://docs.aws.amazon.com/eks/latest/userguide/calico.html for detailed instructions. + This chart installs Calico on AWS: https://docs.aws.amazon.com/eks/latest/userguide/calico.html + ## Prerequisites - Kubernetes 1.11+ running on AWS @@ -38,26 +42,32 @@ If you receive an error similar to `Error: release aws-calico failed: The following table lists the configurable parameters for this chart and their default values. -| Parameter | Description | Default | -|----------------------------------------|---------------------------------------------------------|---------------------------------| -| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | -| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | -| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | -| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | -| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | -| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | -| `calico.node.logseverity` | Calico Node Log Severity | `Info` | -| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | -| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | -| `calico.tag` | Calico version | `v3.8.1` | -| `fullnameOverride` | Override the fullname of the chart | `calico` | -| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | -| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | -| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | +| Parameter | Description | Default | +|------------------------------------------|---------------------------------------------------------|---------------------------------| +| `calico.typha.image` | Calico Typha Image | `quay.io/calico/typha` | +| `calico.typha.resources` | Calico Typha Resources | `requests.memory: 64Mi, requests.cpu: 50m, limits.memory: 96Mi, limits.cpu: 100m` | +| `calico.typha.logseverity` | Calico Typha Log Severity | `Info` | +| `calico.typha.nodeSelector` | Calico Typha Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha.podAnnotations` | Calico Typha Node Pod Annotations | `{}` | +| `calico.typha.podLabels` | Calico Typha Node Pod Labels | `{}` | +| `calico.node.extraEnv` | Calico Node extra ENV vars | `[]` | +| `calico.node.image` | Calico Node Image | `quay.io/calico/node` | +| `calico.node.resources` | Calico Node Resources | `requests.memory: 32Mi, requests.cpu: 20m, limits.memory: 64Mi, limits.cpu: 100m` | +| `calico.node.logseverity` | Calico Node Log Severity | `Info` | +| `calico.node.nodeSelector` | Calico Node Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.node.podAnnotations` | Calico Node Pod Annotations | `{}` | +| `calico.node.podLabels` | Calico Node Pod Labels | `{}` | +| `calico.typha_autoscaler.resources` | Calico Typha Autoscaler Resources | `requests.memory: 16Mi, requests.cpu: 10m, limits.memory: 32Mi, limits.cpu: 10m` | +| `calico.typha_autoscaler.nodeSelector` | Calico Typha Autoscaler Node Selector | `{ beta.kubernetes.io/os: linux }` | +| `calico.typha_autoscaler.podAnnotations` | Calico Typha Autoscaler Pod Annotations | `{}` | +| `calico.typha_autoscaler.podLabels` | Calico Typha Autoscaler Pod Labels | `{}` | +| `calico.tag` | Calico version | `v3.8.1` | +| `fullnameOverride` | Override the fullname of the chart | `calico` | +| `podSecurityPolicy.create` | Specifies whether podSecurityPolicy and related rbac objects should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `autoscaler.image` | Cluster Proportional Autoscaler Image | `k8s.gcr.io/cluster-proportional-autoscaler-amd64` | +| `autoscaler.tag` | Cluster Proportional Autoscaler version | `1.1.2` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml index ce553146..0cc7742c 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/daemon-set.yaml @@ -9,6 +9,9 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + {{- if .Values.calico.node.podLabels }} +{{ toYaml .Values.calico.node.podLabels | indent 6 }} + {{- end }} updateStrategy: type: RollingUpdate rollingUpdate: @@ -17,8 +20,23 @@ spec: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-node" + {{- if .Values.calico.node.podLabels }} +{{ toYaml .Values.calico.node.podLabels | indent 8 }} + {{- end }} + {{- with .Values.calico.node.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-node-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate nodeSelector: {{- toYaml .Values.calico.node.nodeSelector | nindent 8 }} hostNetwork: true @@ -70,6 +88,8 @@ spec: value: "none" - name: FELIX_PROMETHEUSMETRICSENABLED value: "true" + - name: FELIX_ROUTESOURCE + value: "WorkloadIPs" - name: NO_DEFAULT_POOLS value: "true" # Set based on the k8s node name. @@ -95,12 +115,14 @@ spec: periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 + timeoutSeconds: 5 readinessProbe: exec: command: - /bin/calico-node - -felix-ready periodSeconds: 10 + timeoutSeconds: 5 resources: {{- toYaml .Values.calico.node.resources | nindent 12 }} volumeMounts: diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml index a879a8d2..360d4a87 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/deployment.yaml @@ -10,12 +10,21 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + {{- if .Values.calico.typha.podLabels }} +{{ toYaml .Values.calico.typha.podLabels | indent 6 }} + {{- end }} template: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha" + {{- if .Values.calico.typha.podLabels }} +{{ toYaml .Values.calico.typha.podLabels | indent 8 }} + {{- end }} annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + {{- with .Values.calico.typha.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-cluster-critical nodeSelector: @@ -24,9 +33,9 @@ spec: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - {{- if .Values.calico.typha.tolerations }} -{{ toYaml .Values.calico.typha.tolerations | indent 10 }} - {{- end }} + {{- if .Values.calico.typha.tolerations }} + {{- toYaml .Values.calico.typha.tolerations | nindent 8 }} + {{- end }} hostNetwork: true serviceAccountName: "{{ include "aws-calico.serviceAccountName" . }}-node" # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 @@ -97,11 +106,20 @@ spec: selector: matchLabels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + {{- if .Values.calico.typha_autoscaler.podLabels }} +{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 6 }} + {{- end }} replicas: 1 template: metadata: labels: app.kubernetes.io/name: "{{ include "aws-calico.fullname" . }}-typha-autoscaler" + {{- if .Values.calico.typha_autoscaler.podLabels }} +{{ toYaml .Values.calico.typha_autoscaler.podLabels | indent 8 }} + {{- end }} + {{- with .Values.calico.typha_autoscaler.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} spec: priorityClassName: system-cluster-critical nodeSelector: @@ -110,9 +128,9 @@ spec: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - {{- if .Values.calico.typha_autoscaler.tolerations }} -{{ toYaml .Values.calico.typha_autoscaler.tolerations | indent 10 }} - {{- end }} + {{- if .Values.calico.typha_autoscaler.tolerations }} + {{- toYaml .Values.calico.typha_autoscaler.tolerations | nindent 8 }} + {{- end }} containers: - image: "{{ .Values.autoscaler.image }}:{{ .Values.autoscaler.tag }}" name: autoscaler diff --git a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml index 7caa7fa4..64538d7d 100644 --- a/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/templates/rbac.yaml @@ -15,6 +15,14 @@ rules: - configmaps verbs: - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list - apiGroups: [""] resources: - endpoints diff --git a/lib/aws/bootstrap/charts/aws-calico/values.yaml b/lib/aws/bootstrap/charts/aws-calico/values.yaml index c192e92e..26c1da48 100644 --- a/lib/aws/bootstrap/charts/aws-calico/values.yaml +++ b/lib/aws/bootstrap/charts/aws-calico/values.yaml @@ -7,7 +7,7 @@ podSecurityPolicy: create: false calico: - tag: v3.13.4 + tag: v3.19.1 typha: logseverity: Info #Debug, Info, Warning, Error, Fatal @@ -22,6 +22,8 @@ calico: tolerations: [] nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} node: logseverity: Info #Debug, Info, Warning, Error, Fatal image: quay.io/calico/node @@ -37,6 +39,8 @@ calico: # value: 'some value' nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} typha_autoscaler: resources: requests: @@ -48,7 +52,9 @@ calico: tolerations: [] nodeSelector: beta.kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} autoscaler: - tag: "1.7.1" - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.8.3" + image: k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64 diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore index 50af0317..69a52314 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/.helmignore @@ -20,3 +20,4 @@ .idea/ *.tmproj .vscode/ +example-values*.yaml diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml index 47ed2baa..9de84506 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/Chart.yaml @@ -1,27 +1,25 @@ -apiVersion: v1 -appVersion: 1.5.0 -description: A Helm chart for the AWS Node Termination Handler +apiVersion: v2 +appVersion: 1.14.1 +description: A Helm chart for the AWS Node Termination Handler. home: https://github.com/aws/eks-charts icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png keywords: +- aws - eks - ec2 - node-termination - spot +kubeVersion: '>= 1.16-0' maintainers: -- email: nckturner@users.noreply.github.com - name: Nicholas Turner - url: https://github.com/nckturner -- email: stefanprodan@users.noreply.github.com - name: Stefan Prodan - url: https://github.com/stefanprodan +- email: bwagner5@users.noreply.github.com + name: Brandon Wagner + url: https://github.com/bwagner5 - email: jillmon@users.noreply.github.com - name: Jillian Montalvo + name: Jillian Kuentz url: https://github.com/jillmon -- email: mattrandallbecker@users.noreply.github.com - name: Matthew Becker - url: https://github.com/mattrandallbecker name: aws-node-termination-handler sources: -- https://github.com/aws/eks-charts -version: 0.8.0 +- https://github.com/aws/aws-node-termination-handler/ +- https://github.com/aws/eks-charts/ +type: application +version: 0.16.1 diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md index f1847304..48766925 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/README.md @@ -1,96 +1,170 @@ # AWS Node Termination Handler -AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler. +AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at [github.com/aws/aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler). ## Prerequisites -* Kubernetes >= 1.11 +- _Kubernetes_ >= v1.16 ## Installing the Chart -Add the EKS repository to Helm: -```sh -helm repo add eks https://aws.github.io/eks-charts -``` -Install AWS Node Termination Handler: -To install the chart with the release name aws-node-termination-handler and default configuration: +Before you can install the chart you will need to add the `aws` repo to [Helm](https://helm.sh/). -```sh -helm install --name aws-node-termination-handler \ - --namespace kube-system eks/aws-node-termination-handler +```shell +helm repo add eks https://aws.github.io/eks-charts/ ``` -To install into an EKS cluster where the Node Termination Handler is already installed, you can run: +After you've installed the repo you can install the chart, the following command will install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace. -```sh -helm upgrade --install --recreate-pods --force \ - aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler +```shell +helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler ``` -If you receive an error similar to `Error: release aws-node-termination-handler -failed: "aws-node-termination-handler" already exists`, simply rerun -the above command. +To install the chart on an EKS cluster where the AWS Node Termination Handler is already installed, you can run the following command. -The [configuration](#configuration) section lists the parameters that can be configured during installation. - -## Uninstalling the Chart - -To uninstall/delete the `aws-node-termination-handler` deployment: - -```sh -helm delete --purge aws-node-termination-handler +```shell +helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler --recreate-pods --force ``` -The command removes all the Kubernetes components associated with the chart and deletes the release. +If you receive an error similar to the one below simply rerun the above command. + +> Error: release aws-node-termination-handler failed: "aws-node-termination-handler" already exists + +To uninstall the `aws-node-termination-handler` chart installation from the `kube-system` namespace run the following command. + +```shell +helm delete --namespace kube-system aws-node-termination-handler +``` ## Configuration -The following tables lists the configurable parameters of the chart and their default values. +The following tables lists the configurable parameters of the chart and their default values. These values are split up into the [common configuration](#common-configuration) shared by all AWS Node Termination Handler modes, [queue configuration](#queue-processor-mode-configuration) used when AWS Node Termination Handler is in in queue-processor mode, and [IMDS configuration](#imds-mode-configuration) used when AWS Node Termination Handler is in IMDS mode; for more information about the different modes see the project [README](https://github.com/aws/aws-node-termination-handler/blob/main/README.md). -Parameter | Description | Default ---- | --- | --- -`image.repository` | image repository | `amazon/aws-node-termination-handler` -`image.tag` | image tag | `` -`image.pullPolicy` | image pull policy | `IfNotPresent` -`image.pullSecrets` | image pull secrets (for private docker registries) | `[]` -`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false` -`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` -`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` -`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true` -`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80` -`webhookURL` | Posts event data to URL upon instance interruption action | `` -`webhookProxy` | Uses the specified HTTP(S) proxy for sending webhooks | `` -`webhookHeaders` | Replaces the default webhook headers. | `{"Content-type":"application/json"}` -`webhookTemplate` | Replaces the default webhook message template. | `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Description: {{ .Description }} - State: {{ .State }} - Start Time: {{ .StartTime }}"}` -`dryRun` | If true, only log if a node would be drained | `false` -`enableScheduledEventDraining` | [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event | `false` -`enableSpotInterruptionDraining` | If true, drain nodes when the spot interruption termination notice is received | `true` -`metadataTries` | The number of times to try requesting metadata. If you would like 2 retries, set metadata-tries to 3. | `3` -`cordonOnly` | If true, nodes will be cordoned but not drained when an interruption event occurs. | `false` -`taintNode` | If true, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance` and `aws-node-termination-handler/spot-itn` | `false` -`jsonLogging` | If true, use JSON-formatted logs instead of human readable logs. | `false` -`affinity` | node/pod affinities | None -`podAnnotations` | annotations to add to each pod | `{}` -`priorityClassName` | Name of the priorityClass | `system-node-critical` -`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi` -`dnsPolicy` | DaemonSet DNS policy | `ClusterFirstWithHostNet` -`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}` -`tolerations` | list of node taints to tolerate | `[ {"operator": "Exists"} ]` -`rbac.create` | if `true`, create and use RBAC resources | `true` -`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` -`serviceAccount.create` | If `true`, create a new service account | `true` -`serviceAccount.name` | Service account to be used | None -`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` -`procUptimeFile` | (Used for Testing) Specify the uptime file | `/proc/uptime` -`securityContext.runAsUserID` | User ID to run the container | `1000` -`securityContext.runAsGroupID` | Group ID to run the container | `1000` -`nodeSelectorTermsOs` | Operating System Node Selector Key | `beta.kubernetes.io/os` -`nodeSelectorTermsArch` | CPU Architecture Node Selector Key | `beta.kubernetes.io/arch` -`enablePrometheusServer` | If true, start an http server exposing `/metrics` endpoint for prometheus. | `false` -`prometheusServerPort` | Replaces the default HTTP port for exposing prometheus metrics. | `9092` +### Common Configuration -## Metrics endpoint consideration -If prometheus server is enabled and since NTH is a daemonset with `host_networking=true`, nothing else will be able to bind to `:9092` (or the port configured) in the root network namespace -since it's listening on all interfaces. -Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. +The configuration in this table applies to all AWS Node Termination Handler modes. + +| Parameter | Description | Default | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| `image.repository` | Image repository. | `public.ecr.aws/aws-ec2/aws-node-termination-handler` | +| `image.tag` | Image tag. | `v{{ .Chart.AppVersion}}` | +| `image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets. | `[]` | +| `nameOverride` | Override the `name` of the chart. | `""` | +| `fullnameOverride` | Override the `fullname` of the chart. | `""` | +| `serviceAccount.create` | If `true`, create a new service account. | `true` | +| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` | +| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` | +| `rbac.create` | If `true`, create the RBAC resources. | `true` | +| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `true` | +| `customLabels` | Labels to add to all resource metadata. | `{}` | +| `podLabels` | Labels to add to the pod. | `{}` | +| `podAnnotations` | Annotations to add to the pod. | `{}` | +| `podSecurityContext` | Security context for the pod. | _See values.yaml_ | +| `securityContext` | Security context for the _aws-node-termination-handler_ container. | _See values.yaml_ | +| `terminationGracePeriodSeconds` | The termination grace period for the pod. | `nil` | +| `resources` | Resource requests and limits for the _aws-node-termination-handler_ container. | `{}` | +| `nodeSelector` | Expressions to select a node by it's labels for pod assignment. In IMDS mode this has a higher priority than `daemonsetNodeSelector` (for backwards compatibility) but shouldn't be used. | `{}` | +| `affinity` | Affinity settings for pod assignment. In IMDS mode this has a higher priority than `daemonsetAffinity` (for backwards compatibility) but shouldn't be used. | `{}` | +| `tolerations` | Tolerations for pod assignment. In IMDS mode this has a higher priority than `daemonsetTolerations` (for backwards compatibility) but shouldn't be used. | `[]` | +| `extraEnv` | Additional environment variables for the _aws-node-termination-handler_ container. | `[]` | +| `probes` | The Kubernetes liveness probe configuration. | _See values.yaml_ | +| `logLevel` | Sets the log level (`info`,`debug`, or `error`) | `info` | +| `jsonLogging` | If `true`, use JSON-formatted logs instead of human readable logs. | `false` | +| `enablePrometheusServer` | If `true`, start an http server exposing `/metrics` endpoint for _Prometheus_. | `false` | +| `prometheusServerPort` | Replaces the default HTTP port for exposing _Prometheus_ metrics. | `9092` | +| `dryRun` | If `true`, only log if a node would be drained. | `false` | +| `cordonOnly` | If `true`, nodes will be cordoned but not drained when an interruption event occurs. | `false` | +| `taintNode` | If `true`, nodes will be tainted when an interruption event occurs. Currently used taint keys are `aws-node-termination-handler/scheduled-maintenance`, `aws-node-termination-handler/spot-itn`, `aws-node-termination-handler/asg-lifecycle-termination` and `aws-node-termination-handler/rebalance-recommendation`. | `false` | +| `deleteLocalData` | If `true`, continue even if there are pods using local data that will be deleted when the node is drained. | `true` | +| `ignoreDaemonSets` | If `true`, skip terminating daemon set managed pods. | `true` | +| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` | +| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` | +| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` | +| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` | +| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` | +| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` | +| `webhookHeaders` | Replace the default webhook headers (e.g. `{"Content-type":"application/json"}`). | `""` | +| `webhookProxy` | Uses the specified HTTP(S) proxy for sending webhook data. | `""` | +| `webhookTemplate` | Replaces the default webhook message template (e.g. `{"text":"[NTH][Instance Interruption] EventID: {{ .EventID }} - Kind: {{ .Kind }} - Instance: {{ .InstanceID }} - Node: {{ .NodeName }} - Description: {{ .Description }} - Start Time: {{ .StartTime }}"}`). | `""` | +| `webhookTemplateConfigMapName` | Pass the webhook template file as a configmap. | "``" | +| `webhookTemplateConfigMapKey` | Name of the Configmap key storing the template file. | `""` | +| `enableSqsTerminationDraining` | If `true`, this turns on queue-processor mode which drains nodes when an SQS termination event is received. | `false` | + +### Queue-Processor Mode Configuration + +The configuration in this table applies to AWS Node Termination Handler in queue-processor mode. + +| Parameter | Description | Default | +| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | +| `replicas` | The number of replicas in the deployment when using queue-processor mode (NOTE: increasing replicas may cause duplicate webhooks since pods are stateless). | `1` | +| `strategy` | Specify the update strategy for the deployment. | `{}` | +| `podDisruptionBudget` | Limit the disruption for controller pods, requires at least 2 controller replicas. | `{}` | +| `serviceMonitor.create` | If `true`, create a ServiceMonitor. This requires `enablePrometheusServer: true`. | `false` | +| `serviceMonitor.namespace` | Override ServiceMonitor _Helm_ release namespace. | `nil` | +| `serviceMonitor.labels` | Additional ServiceMonitor metadata labels. | `{}` | +| `serviceMonitor.interval` | _Prometheus_ scrape interval. | `30s` | +| `serviceMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` | +| `priorityClassName` | Name of the PriorityClass to use for the Deployment. | `system-cluster-critical` | +| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` | +| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` | +| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` | +| `checkASGTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. | `true` | +| `managedAsgTag` | The node tag to check if `checkASGTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` | +| `assumeAsgTagPropagation` | If `true`, assume that ASG tags will be appear on the ASG's instances. | `false` | + +### IMDS Mode Configuration + +The configuration in this table applies to AWS Node Termination Handler in IMDS mode. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` | +| `linuxPodLabels` | Labels to add to each Linux pod. | `{}` | +| `windowsPodLabels` | Labels to add to each Windows pod. | `{}` | +| `linuxPodAnnotations` | Annotations to add to each Linux pod. | `{}` | +| `windowsPodAnnotations` | Annotations to add to each Windows pod. | `{}` | +| `updateStrategy` | Update strategy for the all DaemonSets. | _See values.yaml_ | +| `daemonsetPriorityClassName` | Name of the PriorityClass to use for all DaemonSets. | `system-node-critical` | +| `podMonitor.create` | If `true`, create a PodMonitor. This requires `enablePrometheusServer: true`. | `false` | +| `podMonitor.namespace` | Override PodMonitor _Helm_ release namespace. | `nil` | +| `podMonitor.labels` | Additional PodMonitor metadata labels | `{}` | +| `podMonitor.interval` | _Prometheus_ scrape interval. | `30s` | +| `podMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` | +| `useHostNetwork` | If `true`, enables `hostNetwork` for the Linux DaemonSet. NOTE: setting this to `false` may cause issues accessing IMDSv2 if your account is not configured with an IP hop count of 2 see [Metrics Endpoint Considerations](#metrics-endpoint-considerations) | `true` | +| `dnsPolicy` | If specified, this overrides `linuxDnsPolicy` and `windowsDnsPolicy` with a single policy. | `""` | +| `linuxDnsPolicy` | DNS policy for the Linux DaemonSet. | `""` | +| `windowsDnsPolicy` | DNS policy for the Windows DaemonSet. | `""` | +| `daemonsetNodeSelector` | Expressions to select a node by it's labels for DaemonSet pod assignment. For backwards compatibility the `nodeSelector` value has priority over this but shouldn't be used. | `{}` | +| `linuxNodeSelector` | Override `daemonsetNodeSelector` for the Linux DaemonSet. | `{}` | +| `windowsNodeSelector` | Override `daemonsetNodeSelector` for the Windows DaemonSet. | `{}` | +| `daemonsetAffinity` | Affinity settings for DaemonSet pod assignment. For backwards compatibility the `affinity` has priority over this but shouldn't be used. | `{}` | +| `linuxAffinity` | Override `daemonsetAffinity` for the Linux DaemonSet. | `{}` | +| `windowsAffinity` | Override `daemonsetAffinity` for the Windows DaemonSet. | `{}` | +| `daemonsetTolerations` | Tolerations for DaemonSet pod assignment. For backwards compatibility the `tolerations` has priority over this but shouldn't be used. | `[]` | +| `linuxTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` | +| `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` | +| `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` | +| `metadataTries` | The number of times to try requesting metadata. | `3` | +| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. | `true` | +| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. This is **EXPERIMENTAL**. | `false` | +| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. | `false` | +| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. | `false` | + +### Testing Configuration + +The configuration in this table applies to AWS Node Termination Handler testing and is **NOT RECOMMENDED** FOR PRODUCTION DEPLOYMENTS. + +| Parameter | Description | Default | +| --------------------- | --------------------------------------------------------------------------------- | -------------- | +| `awsEndpoint` | (Used for testing) If specified, use the provided AWS endpoint to make API calls. | `""` | +| `awsSecretAccessKey` | (Used for testing) Pass-thru environment variable. | `nil` | +| `awsAccessKeyID` | (Used for testing) Pass-thru environment variable. | `nil` | +| `instanceMetadataURL` | (Used for testing) If specified, use the provided metadata URL. | `""` | +| `procUptimeFile` | (Used for Testing) Specify the uptime file. | `/proc/uptime` | + +## Metrics Endpoint Considerations + +AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint. + +You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html). diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl index 902844a7..45f06f4b 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/_helpers.tpl @@ -1,4 +1,5 @@ {{/* vim: set filetype=mustache: */}} + {{/* Expand the name of the chart. */}} @@ -25,17 +26,11 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{/* -Common labels +Equivalent to "aws-node-termination-handler.fullname" except that "-win" indicator is appended to the end. +Name will not exceed 63 characters. */}} -{{- define "aws-node-termination-handler.labels" -}} -app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} -helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -k8s-app: aws-node-termination-handler -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- define "aws-node-termination-handler.fullnameWindows" -}} +{{- include "aws-node-termination-handler.fullname" . | trunc 59 | trimSuffix "-" | printf "%s-win" -}} {{- end -}} {{/* @@ -45,6 +40,47 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{/* +Common labels +*/}} +{{- define "aws-node-termination-handler.labels" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/component: {{ .Release.Name }} +app.kubernetes.io/part-of: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} +{{- with .Values.customLabels }} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "aws-node-termination-handler.selectorLabels" -}} +app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Selector labels for the deployment +*/}} +{{- define "aws-node-termination-handler.selectorLabelsDeployment" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +app.kubernetes.io/component: deployment +{{- end -}} + +{{/* +Selector labels for the daemonset +*/}} +{{- define "aws-node-termination-handler.selectorLabelsDaemonset" -}} +{{ include "aws-node-termination-handler.selectorLabels" . }} +app.kubernetes.io/component: daemonset +{{- end -}} + {{/* Create the name of the service account to use */}} @@ -55,3 +91,19 @@ Create the name of the service account to use {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} + +{{/* +The image to use +*/}} +{{- define "aws-node-termination-handler.image" -}} +{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} + +{{/* Get PodDisruptionBudget API Version */}} +{{- define "aws-node-termination-handler.pdb.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml index dc800866..43c2b030 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrole.yaml @@ -1,7 +1,10 @@ +{{- if .Values.rbac.create -}} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -9,6 +12,7 @@ rules: - nodes verbs: - get + - list - patch - update - apiGroups: @@ -17,6 +21,7 @@ rules: - pods verbs: - list + - get - apiGroups: - "" resources: @@ -35,3 +40,13 @@ rules: - daemonsets verbs: - get +{{- if .Values.emitKubernetesEvents }} +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml index b5c25327..1058df1b 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/clusterrolebinding.yaml @@ -1,12 +1,16 @@ +{{- if .Values.rbac.create -}} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: {{ include "aws-node-termination-handler.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} roleRef: + apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: {{ include "aws-node-termination-handler.fullname" . }} - apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml new file mode 100644 index 00000000..199879c3 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.linux.yaml @@ -0,0 +1,198 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "linux") -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} + kubernetes.io/os: linux + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }} + kubernetes.io/os: linux + k8s-app: aws-node-termination-handler + {{- with (mergeOverwrite (dict) .Values.podLabels .Values.linuxPodLabels) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.linuxPodAnnotations }} + annotations: + {{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.linuxPodAnnotations) | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.daemonsetPriorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + hostNetwork: {{ .Values.useHostNetwork }} + dnsPolicy: {{ default .Values.linuxDnsPolicy .Values.dnsPolicy }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: {{ .Values.enableProbesServer | quote }} + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + {{- with .Values.instanceMetadataURL }} + - name: INSTANCE_METADATA_URL + value: {{ . | quote }} + {{- end }} + - name: METADATA_TRIES + value: {{ .Values.metadataTries | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: {{ .Values.enableSpotInterruptionDraining | quote }} + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: {{ .Values.enableScheduledEventDraining | quote }} + - name: ENABLE_REBALANCE_MONITORING + value: {{ .Values.enableRebalanceMonitoring | quote }} + - name: ENABLE_REBALANCE_DRAINING + value: {{ .Values.enableRebalanceDraining | quote }} + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "false" + - name: UPTIME_FROM_FILE + value: {{ .Values.procUptimeFile | quote }} + {{- if or .Values.enablePrometheusServer .Values.enableProbesServer }} + ports: + {{- if .Values.enableProbesServer }} + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + {{- end }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + {{- end }} + {{- end }} + {{- if .Values.enableProbesServer }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: uptime + mountPath: {{ .Values.procUptimeFile }} + readOnly: true + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: webhook-template + mountPath: /config/ + {{- end }} + volumes: + - name: uptime + hostPath: + path: {{ .Values.procUptimeFile | default "/proc/uptime" }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.linuxNodeSelector) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.linuxAffinity) }} + affinity: + {{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.linuxAffinity)) | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.linuxTolerations) }} + tolerations: + {{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.linuxTolerations )) | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml new file mode 100644 index 00000000..ea7f8337 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.windows.yaml @@ -0,0 +1,192 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (lower .Values.targetNodeOs | contains "windows") -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullnameWindows" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} + kubernetes.io/os: windows + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 8 }} + kubernetes.io/os: windows + k8s-app: aws-node-termination-handler + {{- with (mergeOverwrite (dict) .Values.podLabels .Values.windowsPodLabels) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations .Values.windowsPodAnnotations }} + annotations: + {{- toYaml (mergeOverwrite (dict) .Values.podAnnotations .Values.windowsPodAnnotations) | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.daemonsetPriorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + hostNetwork: false + dnsPolicy: {{ default .Values.windowsDnsPolicy .Values.dnsPolicy }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: {{ .Values.enableProbesServer | quote }} + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + {{- with .Values.instanceMetadataURL }} + - name: INSTANCE_METADATA_URL + value: {{ . | quote }} + {{- end }} + - name: METADATA_TRIES + value: {{ .Values.metadataTries | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: {{ .Values.enableSpotInterruptionDraining | quote }} + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: {{ .Values.enableScheduledEventDraining | quote }} + - name: ENABLE_REBALANCE_MONITORING + value: {{ .Values.enableRebalanceMonitoring | quote }} + - name: ENABLE_REBALANCE_DRAINING + value: {{ .Values.enableRebalanceDraining | quote }} + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "false" + {{- if or .Values.enablePrometheusServer .Values.enableProbesServer }} + ports: + {{- if .Values.enableProbesServer }} + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + hostPort: {{ .Values.probes.httpGet.port }} + {{- end }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + hostPort: {{ .Values.prometheusServerPort }} + {{- end }} + {{- end }} + {{- if .Values.enableProbesServer }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumeMounts: + - name: webhook-template + mountPath: /config/ + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumes: + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: windows + {{- with default .Values.daemonsetNodeSelector (default .Values.nodeSelector .Values.windowsNodeSelector) }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetAffinity (or .Values.affinity .Values.windowsAffinity) }} + affinity: + {{- toYaml (default .Values.daemonsetAffinity (default .Values.affinity .Values.windowsAffinity )) | nindent 8 }} + {{- end }} + {{- if or .Values.daemonsetTolerations (or .Values.tolerations .Values.windowsTolerations) }} + tolerations: + {{- toYaml (default .Values.daemonsetTolerations (default .Values.tolerations .Values.windowsTolerations )) | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml deleted file mode 100644 index fb220022..00000000 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/daemonset.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ include "aws-node-termination-handler.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} -spec: - updateStrategy: -{{ toYaml .Values.updateStrategy | indent 4 }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - {{- if .Values.podAnnotations }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - labels: - app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - k8s-app: aws-node-termination-handler - spec: - volumes: - - name: "uptime" - hostPath: - path: "{{ .Values.procUptimeFile }}" - priorityClassName: "{{ .Values.priorityClassName }}" - affinity: - nodeAffinity: - # NOTE(jaypipes): Change when we complete - # https://github.com/aws/aws-node-termination-handler/issues/8 - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .Values.nodeSelectorTermsOs | default "beta.kubernetes.io/os" | quote }} - operator: In - values: - - linux - - key: {{ .Values.nodeSelectorTermsArch | default "beta.kubernetes.io/arch" | quote }} - operator: In - values: - - amd64 - - arm - - arm64 - serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }} - hostNetwork: true - dnsPolicy: {{ .Values.dnsPolicy }} - containers: - - name: {{ include "aws-node-termination-handler.name" . }} - image: {{ .Values.image.repository}}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: {{ .Values.securityContext.runAsUserID }} - runAsGroup: {{ .Values.securityContext.runAsGroupID }} - allowPrivilegeEscalation: false - volumeMounts: - - name: "uptime" - mountPath: "/proc/uptime" - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SPOT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: DELETE_LOCAL_DATA - value: {{ .Values.deleteLocalData | quote }} - - name: IGNORE_DAEMON_SETS - value: {{ .Values.ignoreDaemonSets | quote }} - - name: GRACE_PERIOD - value: {{ .Values.gracePeriod | quote }} - - name: POD_TERMINATION_GRACE_PERIOD - value: {{ .Values.podTerminationGracePeriod | quote }} - - name: INSTANCE_METADATA_URL - value: {{ .Values.instanceMetadataURL | quote }} - - name: NODE_TERMINATION_GRACE_PERIOD - value: {{ .Values.nodeTerminationGracePeriod | quote }} - - name: WEBHOOK_URL - value: {{ .Values.webhookURL | quote }} - - name: WEBHOOK_HEADERS - value: {{ .Values.webhookHeaders | quote }} - - name: WEBHOOK_TEMPLATE - value: {{ .Values.webhookTemplate | quote }} - - name: DRY_RUN - value: {{ .Values.dryRun | quote }} - - name: ENABLE_SPOT_INTERRUPTION_DRAINING - value: {{ .Values.enableSpotInterruptionDraining | quote }} - - name: ENABLE_SCHEDULED_EVENT_DRAINING - value: {{ .Values.enableScheduledEventDraining | quote }} - - name: METADATA_TRIES - value: {{ .Values.metadataTries | quote }} - - name: CORDON_ONLY - value: {{ .Values.cordonOnly | quote }} - - name: TAINT_NODE - value: {{ .Values.taintNode | quote }} - - name: JSON_LOGGING - value: {{ .Values.jsonLogging | quote }} - - name: WEBHOOK_PROXY - value: {{ .Values.webhookProxy | quote }} - - name: ENABLE_PROMETHEUS_SERVER - value: {{ .Values.enablePrometheusServer | quote }} - - name: PROMETHEUS_SERVER_PORT - value: {{ .Values.prometheusServerPort | quote }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml new file mode 100644 index 00000000..38c10e98 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/deployment.yaml @@ -0,0 +1,202 @@ +{{- if .Values.enableSqsTerminationDraining }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicas }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} + template: + metadata: + labels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 8 }} + k8s-app: aws-node-termination-handler + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-node-termination-handler.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + containers: + - name: aws-node-termination-handler + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "aws-node-termination-handler.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: "true" + - name: PROBES_SERVER_PORT + value: {{ .Values.probes.httpGet.port | quote }} + - name: PROBES_SERVER_ENDPOINT + value: {{ .Values.probes.httpGet.path | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: JSON_LOGGING + value: {{ .Values.jsonLogging | quote }} + - name: ENABLE_PROMETHEUS_SERVER + value: {{ .Values.enablePrometheusServer | quote }} + - name: PROMETHEUS_SERVER_PORT + value: {{ .Values.prometheusServerPort | quote }} + - name: CHECK_ASG_TAG_BEFORE_DRAINING + value: {{ .Values.checkASGTagBeforeDraining | quote }} + - name: MANAGED_ASG_TAG + value: {{ .Values.managedAsgTag | quote }} + - name: ASSUME_ASG_TAG_PROPAGATION + value: {{ .Values.assumeAsgTagPropagation | quote }} + - name: DRY_RUN + value: {{ .Values.dryRun | quote }} + - name: CORDON_ONLY + value: {{ .Values.cordonOnly | quote }} + - name: TAINT_NODE + value: {{ .Values.taintNode | quote }} + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + - name: EMIT_KUBERNETES_EVENTS + value: {{ .Values.emitKubernetesEvents | quote }} + {{- with .Values.kubernetesEventsExtraAnnotations }} + - name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS + value: {{ . | quote }} + {{- end }} + {{- if or .Values.webhookURL .Values.webhookURLSecretName }} + - name: WEBHOOK_URL + {{- if .Values.webhookURLSecretName }} + valueFrom: + secretKeyRef: + name: {{ .Values.webhookURLSecretName }} + key: webhookurl + {{- else }} + value: {{ .Values.webhookURL | quote }} + {{- end }} + {{- end }} + {{- with .Values.webhookHeaders }} + - name: WEBHOOK_HEADERS + value: {{ . | quote }} + {{- end }} + {{- with .Values.webhookProxy }} + - name: WEBHOOK_PROXY + value: {{ . | quote }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + - name: WEBHOOK_TEMPLATE_FILE + value: {{ print "/config/" .Values.webhookTemplateConfigMapKey | quote }} + {{- else if .Values.webhookTemplate }} + - name: WEBHOOK_TEMPLATE + value: {{ .Values.webhookTemplate | quote }} + {{- end }} + - name: ENABLE_SPOT_INTERRUPTION_DRAINING + value: "false" + - name: ENABLE_SCHEDULED_EVENT_DRAINING + value: "false" + - name: ENABLE_REBALANCE_MONITORING + value: "false" + - name: ENABLE_REBALANCE_DRAINING + value: "false" + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "true" + {{- with .Values.awsRegion }} + - name: AWS_REGION + value: {{ . | quote }} + {{- end }} + {{- with .Values.awsEndpoint }} + - name: AWS_ENDPOINT + value: {{ . | quote }} + {{- end }} + {{- if and .Values.awsAccessKeyID .Values.awsSecretAccessKey }} + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.awsAccessKeyID | quote }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ .Values.awsSecretAccessKey | quote }} + {{- end }} + - name: QUEUE_URL + value: {{ .Values.queueURL | quote }} + - name: WORKERS + value: {{ .Values.workers | quote }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: liveness-probe + protocol: TCP + containerPort: {{ .Values.probes.httpGet.port }} + {{- if .Values.enablePrometheusServer }} + - name: http-metrics + protocol: TCP + containerPort: {{ .Values.prometheusServerPort }} + {{- end }} + livenessProbe: + {{- toYaml .Values.probes | nindent 12 }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumeMounts: + - name: webhook-template + mountPath: /config/ + {{- end }} + {{- if and .Values.webhookTemplateConfigMapName .Values.webhookTemplateConfigMapKey }} + volumes: + - name: webhook-template + configMap: + name: {{ .Values.webhookTemplateConfigMapName }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml new file mode 100644 index 00000000..a2564fc5 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.enableSqsTerminationDraining (and .Values.podDisruptionBudget (gt (int .Values.replicas) 1)) }} +apiVersion: {{ include "aws-node-termination-handler.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml new file mode 100644 index 00000000..bbcbd9b4 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/podmonitor.yaml @@ -0,0 +1,31 @@ +{{- if and (not .Values.enableSqsTerminationDraining) (and .Values.enablePrometheusServer .Values.podMonitor.create) -}} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }} + {{- if .Values.podMonitor.namespace }} + namespace: {{ .Values.podMonitor.namespace }} + {{- end }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.podMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: app.kubernetes.io/name + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + podMetricsEndpoints: + - port: http-metrics + path: /metrics + {{- with .Values.podMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.podMonitor.sampleLimit }} + sampleLimit: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDaemonset" . | nindent 6 }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml index 0eda5002..e0034c1f 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/psp.yaml @@ -4,14 +4,25 @@ kind: PodSecurityPolicy metadata: name: {{ template "aws-node-termination-handler.fullname" . }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' spec: privileged: false hostIPC: false - hostNetwork: true + hostNetwork: {{ .Values.useHostNetwork }} hostPID: false +{{- if and (and (not .Values.enableSqsTerminationDraining) .Values.useHostNetwork ) (or .Values.enablePrometheusServer .Values.enableProbesServer) }} + hostPorts: +{{- if .Values.enablePrometheusServer }} + - min: {{ .Values.prometheusServerPort }} + max: {{ .Values.prometheusServerPort }} +{{- end }} +{{- if .Values.enableProbesServer }} + - min: {{ .Values.probesServerPort }} + max: {{ .Values.probesServerPort }} +{{- end }} +{{- end }} readOnlyRootFilesystem: false allowPrivilegeEscalation: false allowedCapabilities: @@ -27,12 +38,13 @@ spec: volumes: - '*' --- -kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: name: {{ template "aws-node-termination-handler.fullname" . }}-psp + namespace: {{ .Release.Namespace }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} rules: - apiGroups: ['policy'] resources: ['podsecuritypolicies'] @@ -44,11 +56,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "aws-node-termination-handler.fullname" . }}-psp + namespace: {{ .Release.Namespace }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole + kind: Role name: {{ template "aws-node-termination-handler.fullname" . }}-psp subjects: - kind: ServiceAccount diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml new file mode 100644 index 00000000..869e2606 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/service.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.enableSqsTerminationDraining .Values.enablePrometheusServer -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 4 }} + ports: + - name: http-metrics + port: {{ .Values.prometheusServerPort }} + targetPort: http-metrics + protocol: TCP +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml index 55f2d766..a83276d6 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/serviceaccount.yaml @@ -3,11 +3,10 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "aws-node-termination-handler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} labels: -{{ include "aws-node-termination-handler.labels" . | indent 4 }} + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} {{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml new file mode 100644 index 00000000..caee5051 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/templates/servicemonitor.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.enableSqsTerminationDraining (and .Values.enablePrometheusServer .Values.serviceMonitor.create) -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "aws-node-termination-handler.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: app.kubernetes.io/name + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http-metrics + path: /metrics + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.sampleLimit }} + sampleLimit: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "aws-node-termination-handler.selectorLabelsDeployment" . | nindent 6 }} +{{- end -}} diff --git a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml index 469a51e4..a49c7d62 100644 --- a/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml +++ b/lib/aws/bootstrap/charts/aws-node-termination-handler/values.yaml @@ -3,100 +3,277 @@ # Declare variables to be passed into your templates. image: - repository: amazon/aws-node-termination-handler - tag: v1.5.0 + repository: public.ecr.aws/aws-ec2/aws-node-termination-handler + # Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }} + tag: "" pullPolicy: IfNotPresent pullSecrets: [] -securityContext: - runAsUserID: 1000 - runAsGroupID: 1000 - nameOverride: "" fullnameOverride: "" -priorityClassName: system-node-critical +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. If namenot set and create is true, a name is generated using fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Specifies if PodSecurityPolicy resources should be created + pspEnabled: true + +customLabels: {} + +podLabels: {} podAnnotations: {} -resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" +podSecurityContext: + fsGroup: 1000 -## enableSpotInterruptionDraining If true, drain nodes when the spot interruption termination notice is receieved -enableSpotInterruptionDraining: "" +securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + runAsUser: 1000 + runAsGroup: 1000 -## enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event -enableScheduledEventDraining: "" +terminationGracePeriodSeconds: -taintNode: false +resources: {} -## dryRun tells node-termination-handler to only log calls to kubernetes control plane +nodeSelector: {} + +affinity: {} + +tolerations: [] + +# Extra environment variables +extraEnv: [] + +# Liveness probe settings +probes: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + +# Set the log level +logLevel: info + +# Log messages in JSON format +jsonLogging: false + +enablePrometheusServer: false +prometheusServerPort: 9092 + +# dryRun tells node-termination-handler to only log calls to kubernetes control plane dryRun: false +# Cordon but do not drain nodes upon spot interruption termination notice. +cordonOnly: false + +# Taint node upon spot interruption termination notice. +taintNode: false + # deleteLocalData tells kubectl to continue even if there are pods using # emptyDir (local data that will be deleted when the node is drained). -deleteLocalData: "" +deleteLocalData: true # ignoreDaemonSets causes kubectl to skip Daemon Set managed pods. -ignoreDaemonSets: "" +ignoreDaemonSets: true -# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully. -# If negative, the default value specified in the pod will be used. -gracePeriod: "" -podTerminationGracePeriod: "" +# podTerminationGracePeriod is time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. +podTerminationGracePeriod: -1 # nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. -nodeTerminationGracePeriod: "" +nodeTerminationGracePeriod: 120 + +# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event +emitKubernetesEvents: false + +# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events +# Example: "first=annotation,sample.annotation/number=two" +kubernetesEventsExtraAnnotations: "" # webhookURL if specified, posts event data to URL upon instance interruption action. webhookURL: "" -# webhookProxy if specified, uses this HTTP(S) proxy configuration. -webhookProxy: "" +# Webhook URL will be fetched from the secret store using the given name. +webhookURLSecretName: "" # webhookHeaders if specified, replaces the default webhook headers. webhookHeaders: "" +# webhookProxy if specified, uses this HTTP(S) proxy configuration. +webhookProxy: "" + # webhookTemplate if specified, replaces the default webhook message template. webhookTemplate: "" -# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80) +# webhook template file will be fetched from given config map name +# if specified, replaces the default webhook message with the content of the template file +webhookTemplateConfigMapName: "" + +# template file name stored in configmap +webhookTemplateConfigMapKey: "" + +# enableSqsTerminationDraining If true, this turns on queue-processor mode which drains nodes when an SQS termination event is received +enableSqsTerminationDraining: false + +# --------------------------------------------------------------------------------------------------------------------- +# Queue Processor Mode +# --------------------------------------------------------------------------------------------------------------------- + +# The number of replicas in the NTH deployment when using queue-processor mode (NOTE: increasing this may cause duplicate webhooks since NTH pods are stateless) +replicas: 1 + +# Specify the update strategy for the deployment +strategy: {} + +# podDisruptionBudget specifies the disruption budget for the controller pods. +# Disruption budget will be configured only when the replicaCount is greater than 1 +podDisruptionBudget: {} +# maxUnavailable: 1 + +serviceMonitor: + # Specifies whether ServiceMonitor should be created + # this needs enableSqsTerminationDraining: true + # and enablePrometheusServer: true + create: false + # Specifies whether the ServiceMonitor should be created in a different namespace than + # the Helm release + namespace: + # Additional labels to add to the metadata + labels: {} + # The Prometheus scrape interval + interval: 30s + # The number of scraped samples that will be accepted + sampleLimit: 5000 + +priorityClassName: system-cluster-critical + +# If specified, use the AWS region for AWS API calls +awsRegion: "" + +# Listens for messages on the specified SQS queue URL +queueURL: "" + +# The maximum amount of parallel event processors to handle concurrent events +workers: 10 + +# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node +checkASGTagBeforeDraining: true + +# The tag to ensure is on a node if checkASGTagBeforeDraining is true +managedAsgTag: "aws-node-termination-handler/managed" + +# If true, assume that ASG tags will be appear on the ASG's instances +assumeAsgTagPropagation: false + +# --------------------------------------------------------------------------------------------------------------------- +# IMDS Mode +# --------------------------------------------------------------------------------------------------------------------- + +# Create node OS specific daemonset(s). (e.g. "linux", "windows", "linux windows") +targetNodeOs: linux + +linuxPodLabels: {} +windowsPodLabels: {} + +linuxPodAnnotations: {} +windowsPodAnnotations: {} + +# K8s DaemonSet update strategy. +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + +daemonsetPriorityClassName: system-node-critical + +podMonitor: + # Specifies whether PodMonitor should be created + # this needs enableSqsTerminationDraining: false + # and enablePrometheusServer: true + create: false + # Specifies whether the PodMonitor should be created in a different namespace than + # the Helm release + namespace: + # Additional labels to add to the metadata + labels: {} + # The Prometheus scrape interval + interval: 30s + # The number of scraped samples that will be accepted + sampleLimit: 5000 + +# Determines if NTH uses host networking for Linux when running the DaemonSet (only IMDS mode; queue-processor never runs with host networking) +# If you have disabled IMDSv1 and are relying on IMDSv2, you'll need to increase the IP hop count to 2 before switching this to false +# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html +useHostNetwork: true + +# Daemonset DNS policy +dnsPolicy: "" +linuxDnsPolicy: ClusterFirstWithHostNet +windowsDnsPolicy: ClusterFirst + +daemonsetNodeSelector: {} +linuxNodeSelector: {} +windowsNodeSelector: {} + +daemonsetAffinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate +linuxAffinity: {} +windowsAffinity: {} + +daemonsetTolerations: + - operator: Exists +linuxTolerations: [] +windowsTolerations: [] + +# If the probes server is running for the Daemonset +enableProbesServer: false + +# Total number of times to try making the metadata request before failing. +metadataTries: 3 + +# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received +enableSpotInterruptionDraining: true + +# enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event +enableScheduledEventDraining: false + +# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received +enableRebalanceMonitoring: false + +# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received +enableRebalanceDraining: false + +# --------------------------------------------------------------------------------------------------------------------- +# Testing +# --------------------------------------------------------------------------------------------------------------------- + +# (TESTING USE): If specified, use the provided AWS endpoint to make API calls. +awsEndpoint: "" + +# (TESTING USE): These should only be used for testing w/ localstack! +awsAccessKeyID: +awsSecretAccessKey: + +# (TESTING USE): Override the default metadata URL (default: http://169.254.169.254:80) instanceMetadataURL: "" # (TESTING USE): Mount path for uptime file -procUptimeFile: "/proc/uptime" - -# nodeSelector tells the daemonset where to place the node-termination-handler -# pods. By default, this value is empty and every node will receive a pod. -nodeSelector: {} - -nodeSelectorTermsOs: "" -nodeSelectorTermsArch: "" - -enablePrometheusServer: false -prometheusServerPort: "9092" - -tolerations: - - operator: "Exists" - -affinity: {} - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. If namenot set and create is true, - # a name is generated using fullname template - name: - annotations: {} - # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME - -rbac: - # rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created - pspEnabled: true - -dnsPolicy: "ClusterFirstWithHostNet" +procUptimeFile: /proc/uptime diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml index 2f572eb2..13d0cfd6 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.7.5 +appVersion: v1.10.2 description: A Helm chart for the AWS VPC CNI home: https://github.com/aws/amazon-vpc-cni-k8s icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png @@ -15,4 +15,4 @@ maintainers: name: aws-vpc-cni sources: - https://github.com/aws/amazon-vpc-cni-k8s -version: 1.1.3 +version: 1.1.13 diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md index 768f629d..24152152 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/README.md +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/README.md @@ -54,6 +54,7 @@ The following table lists the configurable parameters for this chart and their d | `nodeSelector` | Node labels for pod assignment | `{}` | | `podSecurityContext` | Pod Security Context | `{}` | | `podAnnotations` | annotations to add to each pod | `{}` | +| `podLabels` | Labels to add to each pod | `{}` | | `priorityClassName` | Name of the priorityClass | `system-node-critical` | | `resources` | Resources for the pods | `requests.cpu: 10m` | | `securityContext` | Container Security context | `capabilities: add: - "NET_ADMIN"` | @@ -65,6 +66,7 @@ The following table lists the configurable parameters for this chart and their d | `crd.create` | Specifies whether to create the VPC-CNI CRD | `true` | | `tolerations` | Optional deployment tolerations | `[]` | | `updateStrategy` | Optional update strategy | `type: RollingUpdate` | +| `cri.hostPath` | Optional use alternative container runtime | `nil` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a YAML file containing the values for the above parameters: diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml index 0635b5ed..f9db311a 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/clusterrole.yaml @@ -12,9 +12,12 @@ rules: verbs: ["list", "watch", "get"] - apiGroups: [""] resources: - - pods - namespaces verbs: ["list", "watch", "get"] + - apiGroups: [""] + resources: + - pods + verbs: ["list", "watch", "get", "patch"] - apiGroups: [""] resources: - nodes diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml index bdd29e7a..2385fb3a 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/customresourcedefinition.yaml @@ -1,5 +1,5 @@ {{- if .Values.crd.create -}} -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: eniconfigs.crd.k8s.amazonaws.com @@ -12,6 +12,10 @@ spec: - name: v1alpha1 served: true storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true names: plural: eniconfigs singular: eniconfig diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml index 10388ef0..8c31dd77 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/templates/daemonset.yaml @@ -14,6 +14,9 @@ spec: {{- else }} app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} + {{- end }} {{- end }} template: metadata: @@ -27,6 +30,9 @@ spec: app.kubernetes.io/name: {{ include "aws-vpc-cni.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} k8s-app: aws-node + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} spec: priorityClassName: "{{ .Values.priorityClassName }}" serviceAccountName: {{ template "aws-vpc-cni.serviceAccountName" . }} @@ -63,8 +69,10 @@ spec: name: metrics livenessProbe: {{ toYaml .Values.livenessProbe | indent 12 }} + timeoutSeconds: {{ .Values.livenessProbeTimeoutSeconds }} readinessProbe: {{ toYaml .Values.readinessProbe | indent 12 }} + timeoutSeconds: {{ .Values.readinessProbeTimeoutSeconds }} env: {{- range $key, $value := .Values.env }} - name: {{ $key }} @@ -92,8 +100,13 @@ spec: {{- end }} - mountPath: /host/var/log/aws-routed-eni name: log-dir +{{- if .Values.cri.hostPath }} + - mountPath: /var/run/cri.sock + name: cri +{{- else }} - mountPath: /var/run/dockershim.sock name: dockershim +{{- end }} - mountPath: /var/run/aws-node name: run-dir - mountPath: /run/xtables.lock @@ -110,9 +123,15 @@ spec: configMap: name: {{ include "aws-vpc-cni.fullname" . }} {{- end }} +{{- with .Values.cri.hostPath }} + - name: cri + hostPath: + {{- toYaml . | nindent 10 }} +{{- else }} - name: dockershim hostPath: path: /var/run/dockershim.sock +{{- end }} - name: log-dir hostPath: path: /var/log/aws-routed-eni diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml new file mode 100644 index 00000000..6c582a13 --- /dev/null +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/test.yaml @@ -0,0 +1,170 @@ +# Test values for aws-vpc-cni. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This default name override is to maintain backwards compatability with +# existing naming +nameOverride: aws-node + +init: + image: + tag: v1.10.2 + region: us-west-2 + account: "602401143452" + pullPolicy: Always + domain: "amazonaws.com" + # Set to use custom image + # override: "repo/org/image:tag" + env: + DISABLE_TCP_EARLY_DEMUX: "false" + ENABLE_IPv6: "false" + securityContext: + privileged: true + +image: + region: us-west-2 + tag: v1.10.2 + account: "602401143452" + domain: "amazonaws.com" + pullPolicy: Always + # Set to use custom image + # override: "repo/org/image:tag" + +# The CNI supports a number of environment variable settings +# See https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables +env: + ADDITIONAL_ENI_TAGS: "{}" + AWS_VPC_CNI_NODE_PORT_SUPPORT: "true" + AWS_VPC_ENI_MTU: "9001" + AWS_VPC_K8S_CNI_CONFIGURE_RPFILTER: "false" + AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG: "false" + AWS_VPC_K8S_CNI_EXTERNALSNAT: "false" + AWS_VPC_K8S_CNI_LOG_FILE: "/host/var/log/aws-routed-eni/ipamd.log" + AWS_VPC_K8S_CNI_LOGLEVEL: DEBUG + AWS_VPC_K8S_CNI_RANDOMIZESNAT: "prng" + AWS_VPC_K8S_CNI_VETHPREFIX: eni + AWS_VPC_K8S_PLUGIN_LOG_FILE: "/var/log/aws-routed-eni/plugin.log" + AWS_VPC_K8S_PLUGIN_LOG_LEVEL: DEBUG + DISABLE_INTROSPECTION: "false" + DISABLE_METRICS: "false" + ENABLE_POD_ENI: "false" + ENABLE_PREFIX_DELEGATION: "false" + WARM_ENI_TARGET: "1" + WARM_PREFIX_TARGET: "1" + DISABLE_NETWORK_RESOURCE_PROVISIONING: "false" + ENABLE_IPv4: "true" + ENABLE_IPv6: "false" + +# this flag enables you to use the match label that was present in the original daemonset deployed by EKS +# You can then annotate and label the original aws-node resources and 'adopt' them into a helm release +originalMatchLabels: false + +cniConfig: + enabled: false + fileContents: "" + +imagePullSecrets: [] + +fullnameOverride: "aws-node" + +priorityClassName: system-node-critical + +podSecurityContext: {} + +podAnnotations: {} + +podLabels: {} + +securityContext: + capabilities: + add: + - "NET_ADMIN" + +crd: + create: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +livenessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' + initialDelaySeconds: 60 + +livenessProbeTimeoutSeconds: 10 + +readinessProbe: + exec: + command: + - /app/grpc-health-probe + - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' + initialDelaySeconds: 1 + +readinessProbeTimeoutSeconds: 10 + +resources: + requests: + cpu: 10m + +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + +nodeSelector: {} + +tolerations: [] + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "kubernetes.io/os" + operator: In + values: + - linux + - key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate + +eniConfig: + # Specifies whether ENIConfigs should be created + create: false + region: us-west-2 + subnets: + # Key identifies the AZ + # Value contains the subnet ID and security group IDs within that AZ + # a: + # id: subnet-123 + # securityGroups: + # - sg-123 + # b: + # id: subnet-456 + # securityGroups: + # - sg-456 + # c: + # id: subnet-789 + # securityGroups: + # - sg-789 + +cri: + hostPath: # "/var/run/containerd/containerd.sock" \ No newline at end of file diff --git a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml index 84388b40..7917bdff 100644 --- a/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml +++ b/lib/aws/bootstrap/charts/aws-vpc-cni/values.yaml @@ -8,19 +8,24 @@ nameOverride: aws-node init: image: - tag: v1.7.5 + tag: v1.10.2 region: us-west-2 + account: "602401143452" pullPolicy: Always + domain: "amazonaws.com" # Set to use custom image # override: "repo/org/image:tag" env: DISABLE_TCP_EARLY_DEMUX: "false" + ENABLE_IPv6: "false" securityContext: privileged: true image: region: us-west-2 - tag: v1.7.5 + tag: v1.10.2 + account: "602401143452" + domain: "amazonaws.com" pullPolicy: Always # Set to use custom image # override: "repo/org/image:tag" @@ -43,13 +48,18 @@ env: DISABLE_INTROSPECTION: "false" DISABLE_METRICS: "false" ENABLE_POD_ENI: "false" + ENABLE_PREFIX_DELEGATION: "false" WARM_ENI_TARGET: "1" + WARM_PREFIX_TARGET: "1" + DISABLE_NETWORK_RESOURCE_PROVISIONING: "false" + ENABLE_IPv4: "true" + ENABLE_IPv6: "false" # this flag enables you to use the match label that was present in the original daemonset deployed by EKS # You can then annotate and label the original aws-node resources and 'adopt' them into a helm release originalMatchLabels: false -cniConfig: +cniConfig: enabled: false fileContents: "" @@ -63,6 +73,8 @@ podSecurityContext: {} podAnnotations: {} +podLabels: {} + securityContext: capabilities: add: @@ -85,15 +97,23 @@ livenessProbe: command: - /app/grpc-health-probe - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' initialDelaySeconds: 60 +livenessProbeTimeoutSeconds: 10 + readinessProbe: exec: command: - /app/grpc-health-probe - '-addr=:50051' + - '-connect-timeout=5s' + - '-rpc-timeout=5s' initialDelaySeconds: 1 +readinessProbeTimeoutSeconds: 10 + resources: requests: cpu: 10m @@ -111,20 +131,6 @@ affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - - matchExpressions: - - key: "beta.kubernetes.io/os" - operator: In - values: - - linux - - key: "beta.kubernetes.io/arch" - operator: In - values: - - amd64 - - arm64 - - key: "eks.amazonaws.com/compute-type" - operator: NotIn - values: - - fargate - matchExpressions: - key: "kubernetes.io/os" operator: In @@ -158,4 +164,7 @@ eniConfig: # c: # id: subnet-789 # securityGroups: - # - sg-789 \ No newline at end of file + # - sg-789 + +cri: + hostPath: # "/var/run/containerd/containerd.sock" \ No newline at end of file diff --git a/lib/common/bootstrap/charts/cert-manager/Chart.yaml b/lib/common/bootstrap/charts/cert-manager/Chart.yaml index b91d8ee1..01c64ecf 100644 --- a/lib/common/bootstrap/charts/cert-manager/Chart.yaml +++ b/lib/common/bootstrap/charts/cert-manager/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.1.1 +appVersion: v1.2.0 description: A Helm chart for cert-manager home: https://github.com/jetstack/cert-manager icon: https://raw.githubusercontent.com/jetstack/cert-manager/master/logo/logo.png @@ -14,4 +14,4 @@ maintainers: name: cert-manager sources: - https://github.com/jetstack/cert-manager -version: v1.1.1 +version: v1.2.0 diff --git a/lib/common/bootstrap/charts/cert-manager/README.md b/lib/common/bootstrap/charts/cert-manager/README.md index ec353bba..16e55a87 100644 --- a/lib/common/bootstrap/charts/cert-manager/README.md +++ b/lib/common/bootstrap/charts/cert-manager/README.md @@ -19,16 +19,9 @@ Before installing the chart, you must first install the cert-manager CustomResou This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources. ```bash -# Kubernetes 1.15+ -$ kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager.crds.yaml - -# Kubernetes <1.15 -$ kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager-legacy.crds.yaml +$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml ``` -> **Note**: If you're using a Kubernetes version below `v1.15` you will need to install the legacy version of the custom resource definitions. -> This version does not have API version conversion enabled and only supports `cert-manager.io/v1` API resources. - To install the chart with the release name `my-release`: ```console @@ -72,11 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als delete the previously installed CustomResourceDefinition resources: ```console -# Kubernetes 1.15+ -$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager.crds.yaml - -# Kubernetes <1.15 -$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.1.1/cert-manager-legacy.crds.yaml +$ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml ``` ## Configuration @@ -91,9 +80,12 @@ The following table lists the configurable parameters of the cert-manager chart | `global.podSecurityPolicy.enabled` | If `true`, create and use PodSecurityPolicy (includes sub-charts) | `false` | | `global.podSecurityPolicy.useAppArmor` | If `true`, use Apparmor seccomp profile in PSP | `true` | | `global.leaderElection.namespace` | Override the namespace used to store the ConfigMap for leader election | `kube-system` | +| `global.leaderElection.leaseDuration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate | | +| `global.leaderElection.renewDeadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration | | +| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | | | `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` | | `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | -| `image.tag` | Image tag | `v1.1.1` | +| `image.tag` | Image tag | `v1.2.0` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `replicaCount` | Number of cert-manager replicas | `1` | | `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod | @@ -148,7 +140,7 @@ The following table lists the configurable parameters of the cert-manager chart | `webhook.affinity` | Node affinity for webhook pod assignment | `{}` | | `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` | | `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` | -| `webhook.image.tag` | Webhook image tag | `v1.1.1` | +| `webhook.image.tag` | Webhook image tag | `v1.2.0` | | `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` | | `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` | | `webhook.securityContext` | Security context for webhook pod assignment | `{}` | @@ -178,7 +170,7 @@ The following table lists the configurable parameters of the cert-manager chart | `cainjector.affinity` | Node affinity for cainjector pod assignment | `{}` | | `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` | | `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` | -| `cainjector.image.tag` | cainjector image tag | `v1.1.1` | +| `cainjector.image.tag` | cainjector image tag | `v1.2.0` | | `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` | | `cainjector.securityContext` | Security context for cainjector pod assignment | `{}` | | `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | `{}` | diff --git a/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml b/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml index 8ac6da01..8944fb4f 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/cainjector-deployment.yaml @@ -61,7 +61,18 @@ spec: {{- if .Values.global.logLevel }} - --v={{ .Values.global.logLevel }} {{- end }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- with .Values.global.leaderElection }} + - --leader-election-namespace={{ .namespace }} + {{- if .leaseDuration }} + - --leader-election-lease-duration={{ .leaseDuration }} + {{- end }} + {{- if .renewDeadline }} + - --leader-election-renew-deadline={{ .renewDeadline }} + {{- end }} + {{- if .retryPeriod }} + - --leader-election-retry-period={{ .retryPeriod }} + {{- end }} + {{- end }} {{- if .Values.cainjector.extraArgs }} {{ toYaml .Values.cainjector.extraArgs | indent 10 }} {{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml b/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml deleted file mode 100644 index 4026fcee..00000000 --- a/lib/common/bootstrap/charts/cert-manager/templates/crds.legacy.yaml +++ /dev/null @@ -1,6257 +0,0 @@ -{{- if (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) }} -{{- if .Values.installCRDs }} -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: certificaterequests.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: CertificateRequest - listKind: CertificateRequestList - plural: certificaterequests - shortNames: - - cr - - crs - singular: certificaterequest - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from - one of the configured issuers. \n All fields within the CertificateRequest's - `spec` are immutable after creation. A CertificateRequest will either succeed - or fail, as denoted by its `status.state` field. \n A CertificateRequest is - a 'one-shot' resource, meaning it represents a single point in time request - for a certificate and cannot be re-used." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the CertificateRequest resource. - properties: - duration: - description: The requested 'duration' (i.e. lifetime) of the Certificate. - This option may be ignored/overridden by some issuer types. - type: string - isCA: - description: IsCA will request to mark the certificate as valid for - certificate signing when submitting to the issuer. This will automatically - add the `cert sign` usage to the list of `usages`. - type: boolean - issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If - the 'kind' field is not set, or set to 'Issuer', an Issuer resource - with the given name in the same namespace as the CertificateRequest - will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer - with the provided name will be used. The 'name' field in this stanza - is required at all times. The group field refers to the API group - of the issuer which defaults to 'cert-manager.io' if empty. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - request: - description: The PEM-encoded x509 certificate signing request to be - submitted to the CA for signing. - format: byte - type: string - usages: - description: Usages is the set of x509 usages that are requested for - the certificate. If usages are set they SHOULD be encoded inside the - CSR spec Defaults to `digital signature` and `key encipherment` if - not specified. - items: - description: 'KeyUsage specifies valid usage contexts for keys. See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Valid KeyUsage values are as follows: "signing", "digital signature", - "content commitment", "key encipherment", "key agreement", "data - encipherment", "cert sign", "crl sign", "encipher only", "decipher - only", "any", "server auth", "client auth", "code signing", "email - protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec - user", "timestamping", "ocsp signing", "microsoft sgc", "netscape - sgc"' - enum: - - signing - - digital signature - - content commitment - - key encipherment - - key agreement - - data encipherment - - cert sign - - crl sign - - encipher only - - decipher only - - any - - server auth - - client auth - - code signing - - email protection - - s/mime - - ipsec end system - - ipsec tunnel - - ipsec user - - timestamping - - ocsp signing - - microsoft sgc - - netscape sgc - type: string - type: array - required: - - issuerRef - - request - type: object - status: - description: Status of the CertificateRequest. This is set and managed automatically. - properties: - ca: - description: The PEM encoded x509 certificate of the signer, also known - as the CA (Certificate Authority). This is set on a best-effort basis - by different issuers. If not set, the CA is assumed to be unknown/not - available. - format: byte - type: string - certificate: - description: The PEM encoded x509 certificate resulting from the certificate - signing request. If not set, the CertificateRequest has either not - been completed or has failed. More information on failure can be found - by checking the `conditions` field. - format: byte - type: string - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready` and `InvalidRequest`. - items: - description: CertificateRequestCondition contains condition information - for a CertificateRequest. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready', - 'InvalidRequest'). - type: string - required: - - status - - type - type: object - type: array - failureTime: - description: FailureTime stores the time that this CertificateRequest - failed. This is used to influence garbage collection and back-off. - format: date-time - type: string - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: certificates.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .spec.secretName - name: Secret - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: Certificate - listKind: CertificateList - plural: certificates - shortNames: - - cert - - certs - singular: certificate - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: "A Certificate resource should be created to ensure an up to date - and signed x509 certificate is stored in the Kubernetes Secret resource named - in `spec.secretName`. \n The stored certificate will be renewed before it - expires (as configured by `spec.renewBefore`)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the Certificate resource. - properties: - commonName: - description: 'CommonName is a common name to be used on the Certificate. - The CommonName should have a length of 64 characters or fewer to avoid - generating invalid CSRs. This value is ignored by TLS clients when - any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' - type: string - dnsNames: - description: DNSNames is a list of DNS subjectAltNames to be set on - the Certificate. - items: - type: string - type: array - duration: - description: The requested 'duration' (i.e. lifetime) of the Certificate. - This option may be ignored/overridden by some issuer types. If overridden - and `renewBefore` is greater than the actual certificate duration, - the certificate will be automatically renewed 2/3rds of the way through - the certificate's duration. - type: string - emailAddresses: - description: EmailAddresses is a list of email subjectAltNames to be - set on the Certificate. - items: - type: string - type: array - encodeUsagesInRequest: - description: EncodeUsagesInRequest controls whether key usages should - be present in the CertificateRequest - type: boolean - ipAddresses: - description: IPAddresses is a list of IP address subjectAltNames to - be set on the Certificate. - items: - type: string - type: array - isCA: - description: IsCA will mark this Certificate as valid for certificate - signing. This will automatically add the `cert sign` usage to the - list of `usages`. - type: boolean - issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. - If the 'kind' field is not set, or set to 'Issuer', an Issuer resource - with the given name in the same namespace as the Certificate will - be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer - with the provided name will be used. The 'name' field in this stanza - is required at all times. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - keystores: - description: Keystores configures additional keystore output formats - stored in the `secretName` Secret resource. - properties: - jks: - description: JKS configures options for storing a JKS keystore in - the `spec.secretName` Secret resource. - properties: - create: - description: Create enables JKS keystore creation for the Certificate. - If true, a file named `keystore.jks` will be created in the - target Secret resource, encrypted using the password stored - in `passwordSecretRef`. The keystore file will only be updated - upon re-issuance. - type: boolean - passwordSecretRef: - description: PasswordSecretRef is a reference to a key in a - Secret resource containing the password used to encrypt the - JKS keystore. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - create - - passwordSecretRef - type: object - pkcs12: - description: PKCS12 configures options for storing a PKCS12 keystore - in the `spec.secretName` Secret resource. - properties: - create: - description: Create enables PKCS12 keystore creation for the - Certificate. If true, a file named `keystore.p12` will be - created in the target Secret resource, encrypted using the - password stored in `passwordSecretRef`. The keystore file - will only be updated upon re-issuance. - type: boolean - passwordSecretRef: - description: PasswordSecretRef is a reference to a key in a - Secret resource containing the password used to encrypt the - PKCS12 keystore. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - create - - passwordSecretRef - type: object - type: object - privateKey: - description: Options to control private keys used for the Certificate. - properties: - algorithm: - description: Algorithm is the private key algorithm of the corresponding - private key for this certificate. If provided, allowed values - are either "rsa" or "ecdsa" If `algorithm` is specified and `size` - is not provided, key size of 256 will be used for "ecdsa" key - algorithm and key size of 2048 will be used for "rsa" key algorithm. - enum: - - RSA - - ECDSA - type: string - encoding: - description: The private key cryptography standards (PKCS) encoding - for this certificate's private key to be encoded in. If provided, - allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and - PKCS#8, respectively. Defaults to PKCS#1 if not specified. - enum: - - PKCS1 - - PKCS8 - type: string - rotationPolicy: - description: RotationPolicy controls how private keys should be - regenerated when a re-issuance is being processed. If set to Never, - a private key will only be generated if one does not already exist - in the target `spec.secretName`. If one does exists but it does - not have the correct algorithm or size, a warning will be raised - to await user intervention. If set to Always, a private key matching - the specified requirements will be generated whenever a re-issuance - occurs. Default is 'Never' for backward compatibility. - type: string - size: - description: Size is the key bit size of the corresponding private - key for this certificate. If `algorithm` is set to `RSA`, valid - values are `2048`, `4096` or `8192`, and will default to `2048` - if not specified. If `algorithm` is set to `ECDSA`, valid values - are `256`, `384` or `521`, and will default to `256` if not specified. - No other values are allowed. - type: integer - type: object - renewBefore: - description: The amount of time before the currently issued certificate's - `notAfter` time that cert-manager will begin to attempt to renew the - certificate. If this value is greater than the total duration of the - certificate (i.e. notAfter - notBefore), it will be automatically - renewed 2/3rds of the way through the certificate's duration. - type: string - secretName: - description: SecretName is the name of the secret resource that will - be automatically created and managed by this Certificate resource. - It will be populated with a private key and certificate, signed by - the denoted issuer. - type: string - subject: - description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). - properties: - countries: - description: Countries to be used on the Certificate. - items: - type: string - type: array - localities: - description: Cities to be used on the Certificate. - items: - type: string - type: array - organizationalUnits: - description: Organizational Units to be used on the Certificate. - items: - type: string - type: array - organizations: - description: Organizations to be used on the Certificate. - items: - type: string - type: array - postalCodes: - description: Postal codes to be used on the Certificate. - items: - type: string - type: array - provinces: - description: State/Provinces to be used on the Certificate. - items: - type: string - type: array - serialNumber: - description: Serial number to be used on the Certificate. - type: string - streetAddresses: - description: Street addresses to be used on the Certificate. - items: - type: string - type: array - type: object - uris: - description: URIs is a list of URI subjectAltNames to be set on the - Certificate. - items: - type: string - type: array - usages: - description: Usages is the set of x509 usages that are requested for - the certificate. Defaults to `digital signature` and `key encipherment` - if not specified. - items: - description: 'KeyUsage specifies valid usage contexts for keys. See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Valid KeyUsage values are as follows: "signing", "digital signature", - "content commitment", "key encipherment", "key agreement", "data - encipherment", "cert sign", "crl sign", "encipher only", "decipher - only", "any", "server auth", "client auth", "code signing", "email - protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec - user", "timestamping", "ocsp signing", "microsoft sgc", "netscape - sgc"' - enum: - - signing - - digital signature - - content commitment - - key encipherment - - key agreement - - data encipherment - - cert sign - - crl sign - - encipher only - - decipher only - - any - - server auth - - client auth - - code signing - - email protection - - s/mime - - ipsec end system - - ipsec tunnel - - ipsec user - - timestamping - - ocsp signing - - microsoft sgc - - netscape sgc - type: string - type: array - required: - - issuerRef - - secretName - type: object - status: - description: Status of the Certificate. This is set and managed automatically. - properties: - conditions: - description: List of status conditions to indicate the status of certificates. - Known condition types are `Ready` and `Issuing`. - items: - description: CertificateCondition contains condition information for - an Certificate. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready', - `Issuing`). - type: string - required: - - status - - type - type: object - type: array - lastFailureTime: - description: LastFailureTime is the time as recorded by the Certificate - controller of the most recent failure to complete a CertificateRequest - for this Certificate resource. If set, cert-manager will not re-request - another Certificate until 1 hour has elapsed from this time. - format: date-time - type: string - nextPrivateKeySecretName: - description: The name of the Secret resource containing the private - key to be used for the next certificate iteration. The keymanager - controller will automatically set this field if the `Issuing` condition - is set to `True`. It will automatically unset this field when the - Issuing condition is not set or False. - type: string - notAfter: - description: The expiration time of the certificate stored in the secret - named by this resource in `spec.secretName`. - format: date-time - type: string - notBefore: - description: The time after which the certificate stored in the secret - named by this resource in spec.secretName is valid. - format: date-time - type: string - renewalTime: - description: RenewalTime is the time at which the certificate will be - next renewed. If not set, no upcoming renewal is scheduled. - format: date-time - type: string - revision: - description: "The current 'revision' of the certificate as issued. \n - When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` - set to one greater than the current value of this field. \n Upon issuance, - this field will be set to the value of the annotation on the CertificateRequest - resource used to issue the certificate. \n Persisting the value on - the CertificateRequest resource allows the certificates controller - to know whether a request is part of an old issuance or if it is part - of the ongoing revision's issuance by checking if the revision value - in the annotation is greater than this field." - type: integer - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: challenges.acme.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.state - name: State - type: string - - JSONPath: .spec.dnsName - name: Domain - type: string - - JSONPath: .status.reason - name: Reason - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: acme.cert-manager.io - names: - kind: Challenge - listKind: ChallengeList - plural: challenges - singular: challenge - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Challenge is a type to represent a Challenge request with an ACME - server - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - authorizationURL: - description: The URL to the ACME Authorization resource that this challenge - is a part of. - type: string - dnsName: - description: dnsName is the identifier that this challenge is for, e.g. - example.com. If the requested DNSName is a 'wildcard', this field - MUST be set to the non-wildcard domain, e.g. for `*.example.com`, - it must be `example.com`. - type: string - issuerRef: - description: References a properly configured ACME-type Issuer which - should be used to create this Challenge. If the Issuer does not exist, - processing will be retried. If the Issuer is not an 'ACME' Issuer, - an error will be returned and the Challenge will be marked as failed. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - key: - description: 'The ACME challenge key for this challenge For HTTP01 challenges, - this is the value that must be responded with to complete the HTTP01 - challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded - SHA256 sum of the `.` text that must be set as the TXT record content.' - type: string - solver: - description: Contains the domain solving configuration that should be - used to solve this challenge resource. - properties: - dns01: - description: Configures cert-manager to attempt to complete authorizations - by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to manage - DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage DNS01 - challenge records. - properties: - clientID: - description: if both this and ClientSecret are left unset - MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset MSI - will be used - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret then - this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 challenge - records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that tells - cert-manager in which Cloud DNS zone the challenge record - has to be created. If left empty cert-manager will automatically - choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with Cloudflare. - Note: using an API token to authenticate is now the recommended - method as it allows greater control of permissions.' - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when using - API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 challenge - records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within a Secret - resource. In some instances, `key` is a required field. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain Name - System") (https://datatracker.ietf.org/doc/rfc2136/) to manage - DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. If - the host is an IPv6 address it must be enclosed in square - brackets (e.g [2001:db8::1]) ; port is optional. This - field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the DNS supporting - RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` - are defined. Supported values are (case-insensitive): - ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or - ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. If - ``tsigSecretSecretRef`` is defined, this field is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the TSIG - value. If ``tsigKeyName`` is defined, this field is required. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared credentials - file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only this - zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName - api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 provider - will assume using either the explicit credentials AccessKeyID/SecretAccessKey - or the inferred credentials from environment variables, - shared credentials file or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared credentials - file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 challenge - solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should be passed - to the webhook apiserver when challenges are processed. - This can contain arbitrary JSON data. Secret values should - not be specified in this stanza. If secret values are - needed (e.g. credentials for a DNS service), you should - use a SecretKeySelector to reference a Secret resource. - For details on the schema of this field, consult the webhook - provider implementation's documentation. - groupName: - description: The API group name that should be used when - POSTing ChallengePayload resources to the webhook apiserver. - This should be the same as the GroupName specified in - the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined in - the webhook provider implementation. This will typically - be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete authorizations - by performing the HTTP01 challenge flow. It is not possible to - obtain certificates for wildcard domain names (e.g. `*.example.com`) - using the HTTP01 challenge mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver will - solve challenges by creating or modifying Ingress resources - in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating Ingress - resources to solve ACME challenges that use this challenge - solver. Only one of 'class' or 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress used - to solve HTTP01 challenges. Only the 'labels' and - 'annotations' fields may be set. If labels or annotations - overlap with in-built values, the values here will - override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added to - the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to the - created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that should - have ACME challenge solving routes inserted into it in - order to solve HTTP01 challenges. This is typically used - in conjunction with ingress controllers like ingress-gce, - which maintains a 1:1 mapping between external IPs and - ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure the - ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod used to - solve HTTP01 challenges. Only the 'labels' and 'annotations' - fields may be set. If labels or annotations overlap - with in-built values, the values here will override - the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added to - the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to the - created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the HTTP01 - challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' and - 'tolerations' fields are supported currently. All - other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - affinity expressions specified by this - field, but it may choose a node that violates - one or more of the expressions. The node - that is most preferred is the one with - the greatest sum of weights, i.e. for - each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a - sum by iterating through the elements - of this field and adding "weight" to the - sum if the node matches the corresponding - matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling - term matches all objects with implicit - weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no - objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, - associated with the corresponding - weight. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with - matching the corresponding nodeSelectorTerm, - in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to an update), the system may or may - not try to eventually evict the pod from - its node. - properties: - nodeSelectorTerms: - description: Required. A list of node - selector terms. The terms are ORed. - items: - description: A null or empty node - selector term matches no objects. - The requirements of them are ANDed. - The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: Represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists, DoesNotExist. - Gt, and Lt. - type: string - values: - description: An array of - string values. If the - operator is In or NotIn, - the values array must - be non-empty. If the operator - is Exists or DoesNotExist, - the values array must - be empty. If the operator - is Gt or Lt, the values - array must have a single - element, which will be - interpreted as an integer. - This array is replaced - during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the same - node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - affinity expressions specified by this - field, but it may choose a node that violates - one or more of the expressions. The node - that is most preferred is the one with - the greatest sum of weights, i.e. for - each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a - sum by iterating through the elements - of this field and adding "weight" to the - sum if the node has pods which matches - the corresponding podAffinityTerm; the - node(s) with the highest sum are the most - preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key and - values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to a - set of values. Valid - operators are In, - NotIn, Exists and - DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, the - values array must - be non-empty. If the - operator is Exists - or DoesNotExist, the - values array must - be empty. This array - is replaced during - a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in - the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be - co-located (affinity) or not - co-located (anti-affinity) with - the pods matching the labelSelector - in the specified namespaces, - where co-located is defined - as running on a node whose value - of the label with key topologyKey - matches that of any node on - which any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to a pod label update), the system - may or may not try to eventually evict - the pod from its node. When there are - multiple elements, the lists of nodes - corresponding to each podAffinityTerm - are intersected, i.e. all terms must be - satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) with, - where co-located is defined as running - on a node whose value of the label with - key matches that of any - node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an - array of string values. - If the operator is In - or NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the values - array must be empty. This - array is replaced during - a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a - map of {key,value} pairs. A - single {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator - is "In", and the values array - contains only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); null - or empty list means "this pod's - namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running - on a node whose value of the label - with key topologyKey matches that - of any node on which any of the - selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling - rules (e.g. avoid putting this pod in the - same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to - schedule pods to nodes that satisfy the - anti-affinity expressions specified by - this field, but it may choose a node that - violates one or more of the expressions. - The node that is most preferred is the - one with the greatest sum of weights, - i.e. for each node that meets all of the - scheduling requirements (resource request, - requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating - through the elements of this field and - adding "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with the - highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key and - values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to a - set of values. Valid - operators are In, - NotIn, Exists and - DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, the - values array must - be non-empty. If the - operator is Exists - or DoesNotExist, the - values array must - be empty. This array - is replaced during - a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is - a map of {key,value} pairs. - A single {key,value} in - the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", - the operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be - co-located (affinity) or not - co-located (anti-affinity) with - the pods matching the labelSelector - in the specified namespaces, - where co-located is defined - as running on a node whose value - of the label with key topologyKey - matches that of any node on - which any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with - matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at - scheduling time, the pod will not be scheduled - onto the node. If the anti-affinity requirements - specified by this field cease to be met - at some point during pod execution (e.g. - due to a pod label update), the system - may or may not try to eventually evict - the pod from its node. When there are - multiple elements, the lists of nodes - corresponding to each podAffinityTerm - are intersected, i.e. all terms must be - satisfied. - items: - description: Defines a set of pods (namely - those matching the labelSelector relative - to the given namespace(s)) that this - pod should be co-located (affinity) - or not co-located (anti-affinity) with, - where co-located is defined as running - on a node whose value of the label with - key matches that of any - node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, a key, - and an operator that relates - the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: operator represents - a key's relationship to - a set of values. Valid - operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an - array of string values. - If the operator is In - or NotIn, the values array - must be non-empty. If - the operator is Exists - or DoesNotExist, the values - array must be empty. This - array is replaced during - a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a - map of {key,value} pairs. A - single {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator - is "In", and the values array - contains only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); null - or empty list means "this pod's - namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running - on a node whose value of the label - with key topologyKey matches that - of any node on which any of the - selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which must - be true for the pod to fit on a node. Selector - which must match a node''s labels for the pod - to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached - to tolerates any taint that matches the triple - using the matching operator - . - properties: - effect: - description: Effect indicates the taint effect - to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, - PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the - toleration applies to. Empty means match - all taint keys. If the key is empty, operator - must be Exists; this combination means to - match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists - and Equal. Defaults to Equal. Exists is - equivalent to wildcard for value, so that - a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration (which - must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By - default, it is not set, which means tolerate - the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the - toleration matches to. If the operator is - Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes solver - service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. If - not specified, the solver will be treated as the 'default' solver - with the lowest priority, i.e. if any other solver has a more - specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be used - to solve. If specified and a match is found, a dnsNames selector - will take precedence over a dnsZones selector. If multiple - solvers match with the same dnsNames value, the solver with - the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in - the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be used - to solve. The most specific DNS zone match specified here - will take precedence over other DNS zone matches, so a solver - specifying sys.example.com will be selected over one specifying - example.com for the domain www.sys.example.com. If multiple - solvers match with the same dnsZones value, the solver with - the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in - the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the set - of certificate's that this challenge solver will apply to. - type: object - type: object - type: object - token: - description: The ACME challenge token for this challenge. This is the - raw value returned from the ACME server. - type: string - type: - description: The type of ACME challenge this resource represents. One - of "HTTP-01" or "DNS-01". - enum: - - HTTP-01 - - DNS-01 - type: string - url: - description: The URL of the ACME Challenge resource for this challenge. - This can be used to lookup details about the status of this challenge. - type: string - wildcard: - description: wildcard will be true if this challenge is for a wildcard - identifier, for example '*.example.com'. - type: boolean - required: - - authorizationURL - - dnsName - - issuerRef - - key - - solver - - token - - type - - url - type: object - status: - properties: - presented: - description: presented will be set to true if the challenge values for - this challenge are currently 'presented'. This *does not* imply the - self check is passing. Only that the values have been 'submitted' - for the appropriate challenge mechanism (i.e. the DNS01 TXT record - has been presented, or the HTTP01 configuration has been configured). - type: boolean - processing: - description: Used to denote whether this challenge should be processed - or not. This field will only be set to true by the 'scheduling' component. - It will only be set to false by the 'challenges' controller, after - the challenge has reached a final state or timed out. If this field - is set to false, the challenge controller will not take any more action. - type: boolean - reason: - description: Contains human readable information on why the Challenge - is in the current state. - type: string - state: - description: Contains the current 'state' of the challenge. If not set, - the state of the challenge is unknown. - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - type: object - required: - - metadata - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: clusterissuers.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: ClusterIssuer - listKind: ClusterIssuerList - plural: clusterissuers - singular: clusterissuer - scope: Cluster - subresources: - status: {} - validation: - openAPIV3Schema: - description: A ClusterIssuer represents a certificate issuing authority which - can be referenced as part of `issuerRef` fields. It is similar to an Issuer, - however it is cluster-scoped and therefore can be referenced by resources - that exist in *any* namespace, not just the same namespace as the referent. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the ClusterIssuer resource. - properties: - acme: - description: ACME configures this issuer to communicate with a RFC8555 - (ACME) server to obtain signed x509 certificates. - properties: - disableAccountKeyGeneration: - description: Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account - but will expect the account key to be supplied via an existing - secret. If false, the cert-manager system will generate a new - ACME account key for the Issuer. Defaults to false. - type: boolean - email: - description: Email is the email address to be associated with the - ACME account. This field is optional, but it is strongly recommended - to be set. It will be used to contact you in case of issues with - your account or certificates, including expiry notification emails. - This field may be updated after the account is initially registered. - type: string - enableDurationFeature: - description: Enables requesting a Not After date on certificates - that matches the duration of the certificate. This is not supported - by all ACME servers like Let's Encrypt. If set to true when the - ACME server does not support it it will create an error on the - Order. Defaults to false. - type: boolean - externalAccountBinding: - description: ExternalAccountBinding is a reference to a CA external - account of the ACME server. If set, upon registration cert-manager - will attempt to associate the given external account credentials - with the registered ACME account. - properties: - keyAlgorithm: - description: keyAlgorithm is the MAC key algorithm that the - key is used for. Valid values are "HS256", "HS384" and "HS512". - enum: - - HS256 - - HS384 - - HS512 - type: string - keyID: - description: keyID is the ID of the CA key that the External - Account is bound to. - type: string - keySecretRef: - description: keySecretRef is a Secret Key Selector referencing - a data item in a Kubernetes Secret which holds the symmetric - MAC key of the External Account Binding. The `key` is the - index string that is paired with the key data in the Secret - and should not be confused with the key data itself, or indeed - with the External Account Binding keyID above. The secret - key stored in the Secret **must** be un-padded, base64 URL - encoded data. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - keyAlgorithm - - keyID - - keySecretRef - type: object - preferredChain: - description: 'PreferredChain is the chain to use if the ACME server - outputs multiple. PreferredChain is no guarantee that this one - gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s - DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" - for the newer Let''s Encrypt root CA. This value picks the first - certificate bundle in the ACME alternative chains that has a certificate - with this value as its issuer''s CN' - maxLength: 64 - type: string - privateKeySecretRef: - description: PrivateKey is the name of a Kubernetes Secret resource - that will be used to store the automatically generated ACME account - private key. Optionally, a `key` may be specified to select a - specific entry within the named Secret resource. If `key` is not - specified, a default of `tls.key` will be used. - properties: - key: - description: The key of the entry in the Secret resource's `data` - field to be used. Some instances of this field may be defaulted, - in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - server: - description: 'Server is the URL used to access the ACME server''s - ''directory'' endpoint. For example, for Let''s Encrypt''s staging - endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported.' - type: string - skipTLSVerify: - description: Enables or disables validation of the ACME server TLS - certificate. If true, requests to the ACME server will not have - their TLS certificate validated (i.e. insecure connections will - be allowed). Only enable this option in development environments. - The cert-manager system installed roots will be used to verify - connections to the ACME server if this is false. Defaults to false. - type: boolean - solvers: - description: 'Solvers is a list of challenge solvers that will be - used to solve ACME challenges for the matching domains. Solver - configurations must be provided in order to obtain certificates - from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' - items: - description: Configures an issuer to solve challenges using the - specified options. Only one of HTTP01 or DNS01 may be provided. - properties: - dns01: - description: Configures cert-manager to attempt to complete - authorizations by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to - manage DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage - DNS01 challenge records. - properties: - clientID: - description: if both this and ClientSecret are left - unset MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset - MSI will be used - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret - then this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 - challenge records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that - tells cert-manager in which Cloud DNS zone the challenge - record has to be created. If left empty cert-manager - will automatically choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with - Cloudflare. Note: using an API token to authenticate - is now the recommended method as it allows greater - control of permissions.' - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when - using API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 - challenge records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain - Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. - If the host is an IPv6 address it must be enclosed - in square brackets (e.g [2001:db8::1]) ; port is - optional. This field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the - DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` - and ``tsigKeyName`` are defined. Supported values - are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, - ``HMACSHA256`` or ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field - is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the - TSIG value. If ``tsigKeyName`` is defined, this - field is required. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only - this zone in Route53 and will not do an lookup using - the route53:ListHostedZonesByName api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 - provider will assume using either the explicit credentials - AccessKeyID/SecretAccessKey or the inferred credentials - from environment variables, shared credentials file - or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 - challenge solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should - be passed to the webhook apiserver when challenges - are processed. This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for - a DNS service), you should use a SecretKeySelector - to reference a Secret resource. For details on the - schema of this field, consult the webhook provider - implementation's documentation. - groupName: - description: The API group name that should be used - when POSTing ChallengePayload resources to the webhook - apiserver. This should be the same as the GroupName - specified in the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined - in the webhook provider implementation. This will - typically be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete - authorizations by performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain - names (e.g. `*.example.com`) using the HTTP01 challenge - mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver - will solve challenges by creating or modifying Ingress - resources in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating - Ingress resources to solve ACME challenges that - use this challenge solver. Only one of 'class' or - 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 - challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that - should have ACME challenge solving routes inserted - into it in order to solve HTTP01 challenges. This - is typically used in conjunction with ingress controllers - like ingress-gce, which maintains a 1:1 mapping - between external IPs and ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure - the ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the - HTTP01 challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' - and 'tolerations' fields are supported currently. - All other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node matches the - corresponding matchExpressions; - the node(s) with the highest sum - are the most preferred. - items: - description: An empty preferred - scheduling term matches all objects - with implicit weight 0 (i.e. it's - a no-op). A null preferred scheduling - term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector - term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated - with matching the corresponding - nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to an update), the system - may or may not try to eventually - evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list - of node selector terms. The - terms are ORed. - items: - description: A null or empty - node selector term matches - no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of - the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the - same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node has pods - which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to a pod label update), - the system may or may not try to - eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity - scheduling rules (e.g. avoid putting - this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the anti-affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - anti-affinity expressions, etc.), - compute a sum by iterating through - the elements of this field and adding - "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity - requirements specified by this field - are not met at scheduling time, - the pod will not be scheduled onto - the node. If the anti-affinity requirements - specified by this field cease to - be met at some point during pod - execution (e.g. due to a pod label - update), the system may or may not - try to eventually evict the pod - from its node. When there are multiple - elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which - must be true for the pod to fit on a node. - Selector which must match a node''s labels - for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service - account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is - attached to tolerates any taint that matches - the triple using the - matching operator . - properties: - effect: - description: Effect indicates the taint - effect to match. Empty means match - all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Key is the taint key that - the toleration applies to. Empty means - match all taint keys. If the key is - empty, operator must be Exists; this - combination means to match all values - and all keys. - type: string - operator: - description: Operator represents a key's - relationship to the value. Valid operators - are Exists and Equal. Defaults to - Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration - (which must be of effect NoExecute, - otherwise this field is ignored) tolerates - the taint. By default, it is not set, - which means tolerate the taint forever - (do not evict). Zero and negative - values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value - the toleration matches to. If the - operator is Exists, the value should - be empty, otherwise just a regular - string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes - solver service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' - solver with the lowest priority, i.e. if any other solver - has a more specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be - used to solve. If specified and a match is found, a - dnsNames selector will take precedence over a dnsZones - selector. If multiple solvers match with the same dnsNames - value, the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be - used to solve. The most specific DNS zone match specified - here will take precedence over other DNS zone matches, - so a solver specifying sys.example.com will be selected - over one specifying example.com for the domain www.sys.example.com. - If multiple solvers match with the same dnsZones value, - the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the - set of certificate's that this challenge solver will - apply to. - type: object - type: object - type: object - type: array - required: - - privateKeySecretRef - - server - type: object - ca: - description: CA configures this issuer to sign certificates using a - signing CA keypair stored in a Secret resource. This is used to build - internal PKIs that are managed by cert-manager. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set, - certificates will be issued without distribution points set. - items: - type: string - type: array - secretName: - description: SecretName is the name of the secret used to sign Certificates - issued by this Issuer. - type: string - required: - - secretName - type: object - selfSigned: - description: SelfSigned configures this issuer to 'self sign' certificates - using the private key used to create the CertificateRequest object. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set - certificate will be issued without CDP. Values are strings. - items: - type: string - type: array - type: object - vault: - description: Vault configures this issuer to sign certificates using - a HashiCorp Vault PKI backend. - properties: - auth: - description: Auth configures how cert-manager authenticates with - the Vault server. - properties: - appRole: - description: AppRole authenticates with Vault using the App - Role auth mechanism, with the role and secret stored in a - Kubernetes Secret resource. - properties: - path: - description: 'Path where the App Role authentication backend - is mounted in Vault, e.g: "approle"' - type: string - roleId: - description: RoleID configured in the App Role authentication - backend when setting up the authentication backend in - Vault. - type: string - secretRef: - description: Reference to a key in a Secret that contains - the App Role secret used to authenticate with Vault. The - `key` field must be specified and denotes which entry - within the Secret resource is used as the app role secret. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - path - - roleId - - secretRef - type: object - kubernetes: - description: Kubernetes authenticates with Vault by passing - the ServiceAccount token stored in the named Secret resource - to the Vault server. - properties: - mountPath: - description: The Vault mountPath here is the mount path - to use when authenticating with Vault. For example, setting - a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` - to authenticate with Vault. If unspecified, the default - value "/v1/auth/kubernetes" will be used. - type: string - role: - description: A required field containing the Vault Role - to assume. A Role binds a Kubernetes ServiceAccount with - a set of Vault policies. - type: string - secretRef: - description: The required Secret field containing a Kubernetes - ServiceAccount JWT used for authenticating with Vault. - Use of 'ambient credentials' is not supported. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - role - - secretRef - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by presenting - a token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - type: object - caBundle: - description: PEM encoded CA bundle used to validate Vault server - certificate. Only used if the Server URL is using HTTPS protocol. - This parameter is ignored for plain HTTP protocol connection. - If not set the system root certificates are used to validate the - TLS connection. - format: byte - type: string - namespace: - description: 'Name of the vault namespace. Namespaces is a set of - features within Vault Enterprise that allows Vault environments - to support Secure Multi-tenancy. e.g: "ns1" More about namespaces - can be found here https://www.vaultproject.io/docs/enterprise/namespaces' - type: string - path: - description: 'Path is the mount path of the Vault PKI backend''s - `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' - type: string - server: - description: 'Server is the connection address for the Vault server, - e.g: "https://vault.example.com:8200".' - type: string - required: - - auth - - path - - server - type: object - venafi: - description: Venafi configures this issuer to sign certificates using - a Venafi TPP or Venafi Cloud policy zone. - properties: - cloud: - description: Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. - properties: - apiTokenSecretRef: - description: APITokenSecretRef is a secret key selector for - the Venafi Cloud API token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: URL is the base URL for Venafi Cloud. Defaults - to "https://api.venafi.cloud/v1". - type: string - required: - - apiTokenSecretRef - type: object - tpp: - description: TPP specifies Trust Protection Platform configuration - settings. Only one of TPP or Cloud may be specified. - properties: - caBundle: - description: CABundle is a PEM encoded TLS certificate to use - to verify connections to the TPP instance. If specified, system - roots will not be used and the issuing CA for the TPP instance - must be verifiable using the provided root. If not specified, - the connection will be verified using the cert-manager system - root certificates. - format: byte - type: string - credentialsRef: - description: CredentialsRef is a reference to a Secret containing - the username and password for the TPP server. The secret must - contain two keys, 'username' and 'password'. - properties: - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: 'URL is the base URL for the vedsdk endpoint of - the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' - type: string - required: - - credentialsRef - - url - type: object - zone: - description: Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by - the named zone policy. This field is required. - type: string - required: - - zone - type: object - type: object - status: - description: Status of the ClusterIssuer. This is set and managed automatically. - properties: - acme: - description: ACME specific status options. This field should only be - set if the Issuer is configured to use an ACME server to issue certificates. - properties: - lastRegisteredEmail: - description: LastRegisteredEmail is the email associated with the - latest registered ACME account, in order to track changes made - to registered account associated with the Issuer - type: string - uri: - description: URI is the unique account identifier, which can also - be used to retrieve account details from the CA - type: string - type: object - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. - items: - description: IssuerCondition contains condition information for an - Issuer. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready'). - type: string - required: - - status - - type - type: object - type: array - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: issuers.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - JSONPath: .status.conditions[?(@.type=="Ready")].message - name: Status - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: cert-manager.io - names: - kind: Issuer - listKind: IssuerList - plural: issuers - singular: issuer - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: An Issuer represents a certificate issuing authority which can - be referenced as part of `issuerRef` fields. It is scoped to a single namespace - and can therefore only be referenced by resources within the same namespace. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Desired state of the Issuer resource. - properties: - acme: - description: ACME configures this issuer to communicate with a RFC8555 - (ACME) server to obtain signed x509 certificates. - properties: - disableAccountKeyGeneration: - description: Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account - but will expect the account key to be supplied via an existing - secret. If false, the cert-manager system will generate a new - ACME account key for the Issuer. Defaults to false. - type: boolean - email: - description: Email is the email address to be associated with the - ACME account. This field is optional, but it is strongly recommended - to be set. It will be used to contact you in case of issues with - your account or certificates, including expiry notification emails. - This field may be updated after the account is initially registered. - type: string - enableDurationFeature: - description: Enables requesting a Not After date on certificates - that matches the duration of the certificate. This is not supported - by all ACME servers like Let's Encrypt. If set to true when the - ACME server does not support it it will create an error on the - Order. Defaults to false. - type: boolean - externalAccountBinding: - description: ExternalAccountBinding is a reference to a CA external - account of the ACME server. If set, upon registration cert-manager - will attempt to associate the given external account credentials - with the registered ACME account. - properties: - keyAlgorithm: - description: keyAlgorithm is the MAC key algorithm that the - key is used for. Valid values are "HS256", "HS384" and "HS512". - enum: - - HS256 - - HS384 - - HS512 - type: string - keyID: - description: keyID is the ID of the CA key that the External - Account is bound to. - type: string - keySecretRef: - description: keySecretRef is a Secret Key Selector referencing - a data item in a Kubernetes Secret which holds the symmetric - MAC key of the External Account Binding. The `key` is the - index string that is paired with the key data in the Secret - and should not be confused with the key data itself, or indeed - with the External Account Binding keyID above. The secret - key stored in the Secret **must** be un-padded, base64 URL - encoded data. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - keyAlgorithm - - keyID - - keySecretRef - type: object - preferredChain: - description: 'PreferredChain is the chain to use if the ACME server - outputs multiple. PreferredChain is no guarantee that this one - gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s - DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" - for the newer Let''s Encrypt root CA. This value picks the first - certificate bundle in the ACME alternative chains that has a certificate - with this value as its issuer''s CN' - maxLength: 64 - type: string - privateKeySecretRef: - description: PrivateKey is the name of a Kubernetes Secret resource - that will be used to store the automatically generated ACME account - private key. Optionally, a `key` may be specified to select a - specific entry within the named Secret resource. If `key` is not - specified, a default of `tls.key` will be used. - properties: - key: - description: The key of the entry in the Secret resource's `data` - field to be used. Some instances of this field may be defaulted, - in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - server: - description: 'Server is the URL used to access the ACME server''s - ''directory'' endpoint. For example, for Let''s Encrypt''s staging - endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported.' - type: string - skipTLSVerify: - description: Enables or disables validation of the ACME server TLS - certificate. If true, requests to the ACME server will not have - their TLS certificate validated (i.e. insecure connections will - be allowed). Only enable this option in development environments. - The cert-manager system installed roots will be used to verify - connections to the ACME server if this is false. Defaults to false. - type: boolean - solvers: - description: 'Solvers is a list of challenge solvers that will be - used to solve ACME challenges for the matching domains. Solver - configurations must be provided in order to obtain certificates - from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' - items: - description: Configures an issuer to solve challenges using the - specified options. Only one of HTTP01 or DNS01 may be provided. - properties: - dns01: - description: Configures cert-manager to attempt to complete - authorizations by performing the DNS01 challenge flow. - properties: - acmeDNS: - description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) - API to manage DNS01 challenge records. - properties: - accountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - host: - type: string - required: - - accountSecretRef - - host - type: object - akamai: - description: Use the Akamai DNS zone management API to - manage DNS01 challenge records. - properties: - accessTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientSecretSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - clientTokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - serviceConsumerDomain: - type: string - required: - - accessTokenSecretRef - - clientSecretSecretRef - - clientTokenSecretRef - - serviceConsumerDomain - type: object - azureDNS: - description: Use the Microsoft Azure DNS API to manage - DNS01 challenge records. - properties: - clientID: - description: if both this and ClientSecret are left - unset MSI will be used - type: string - clientSecretSecretRef: - description: if both this and ClientID are left unset - MSI will be used - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - environment: - enum: - - AzurePublicCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureUSGovernmentCloud - type: string - hostedZoneName: - type: string - resourceGroupName: - type: string - subscriptionID: - type: string - tenantID: - description: when specifying ClientID and ClientSecret - then this field is also needed - type: string - required: - - resourceGroupName - - subscriptionID - type: object - cloudDNS: - description: Use the Google Cloud DNS API to manage DNS01 - challenge records. - properties: - hostedZoneName: - description: HostedZoneName is an optional field that - tells cert-manager in which Cloud DNS zone the challenge - record has to be created. If left empty cert-manager - will automatically choose a zone. - type: string - project: - type: string - serviceAccountSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - project - type: object - cloudflare: - description: Use the Cloudflare API to manage DNS01 challenge - records. - properties: - apiKeySecretRef: - description: 'API key to use to authenticate with - Cloudflare. Note: using an API token to authenticate - is now the recommended method as it allows greater - control of permissions.' - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - apiTokenSecretRef: - description: API token used to authenticate with Cloudflare. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - email: - description: Email of the account, only required when - using API key based authentication. - type: string - type: object - cnameStrategy: - description: CNAMEStrategy configures how the DNS01 provider - should handle CNAME records when found in DNS zones. - enum: - - None - - Follow - type: string - digitalocean: - description: Use the DigitalOcean DNS API to manage DNS01 - challenge records. - properties: - tokenSecretRef: - description: A reference to a specific 'key' within - a Secret resource. In some instances, `key` is a - required field. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - tokenSecretRef - type: object - rfc2136: - description: Use RFC2136 ("Dynamic Updates in the Domain - Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. - properties: - nameserver: - description: The IP address or hostname of an authoritative - DNS server supporting RFC2136 in the form host:port. - If the host is an IPv6 address it must be enclosed - in square brackets (e.g [2001:db8::1]) ; port is - optional. This field is required. - type: string - tsigAlgorithm: - description: 'The TSIG Algorithm configured in the - DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` - and ``tsigKeyName`` are defined. Supported values - are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, - ``HMACSHA256`` or ``HMACSHA512``.' - type: string - tsigKeyName: - description: The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field - is required. - type: string - tsigSecretSecretRef: - description: The name of the secret containing the - TSIG value. If ``tsigKeyName`` is defined, this - field is required. - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - nameserver - type: object - route53: - description: Use the AWS Route53 API to manage DNS01 challenge - records. - properties: - accessKeyID: - description: 'The AccessKeyID is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' - type: string - hostedZoneID: - description: If set, the provider will manage only - this zone in Route53 and will not do an lookup using - the route53:ListHostedZonesByName api call. - type: string - region: - description: Always set the region when using AccessKeyID - and SecretAccessKey - type: string - role: - description: Role is a Role ARN which the Route53 - provider will assume using either the explicit credentials - AccessKeyID/SecretAccessKey or the inferred credentials - from environment variables, shared credentials file - or AWS Instance metadata - type: string - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. - If not set we fall-back to using env vars, shared - credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - key: - description: The key of the entry in the Secret - resource's `data` field to be used. Some instances - of this field may be defaulted, in others it - may be required. - type: string - name: - description: 'Name of the resource being referred - to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - region - type: object - webhook: - description: Configure an external webhook based DNS01 - challenge solver to manage DNS01 challenge records. - properties: - config: - description: Additional configuration that should - be passed to the webhook apiserver when challenges - are processed. This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for - a DNS service), you should use a SecretKeySelector - to reference a Secret resource. For details on the - schema of this field, consult the webhook provider - implementation's documentation. - groupName: - description: The API group name that should be used - when POSTing ChallengePayload resources to the webhook - apiserver. This should be the same as the GroupName - specified in the webhook provider implementation. - type: string - solverName: - description: The name of the solver to use, as defined - in the webhook provider implementation. This will - typically be the name of the provider, e.g. 'cloudflare'. - type: string - required: - - groupName - - solverName - type: object - type: object - http01: - description: Configures cert-manager to attempt to complete - authorizations by performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain - names (e.g. `*.example.com`) using the HTTP01 challenge - mechanism. - properties: - ingress: - description: The ingress based HTTP01 challenge solver - will solve challenges by creating or modifying Ingress - resources in order to route requests for '/.well-known/acme-challenge/XYZ' - to 'challenge solver' pods that are provisioned by cert-manager - for each Challenge to be completed. - properties: - class: - description: The ingress class to use when creating - Ingress resources to solve ACME challenges that - use this challenge solver. Only one of 'class' or - 'name' may be specified. - type: string - ingressTemplate: - description: Optional ingress template used to configure - the ACME challenge solver ingress used for HTTP01 - challenges - properties: - metadata: - description: ObjectMeta overrides for the ingress - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the created ACME HTTP01 solver ingress. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver ingress. - type: object - type: object - type: object - name: - description: The name of the ingress resource that - should have ACME challenge solving routes inserted - into it in order to solve HTTP01 challenges. This - is typically used in conjunction with ingress controllers - like ingress-gce, which maintains a 1:1 mapping - between external IPs and ingress resources. - type: string - podTemplate: - description: Optional pod template used to configure - the ACME challenge solver pods used for HTTP01 challenges - properties: - metadata: - description: ObjectMeta overrides for the pod - used to solve HTTP01 challenges. Only the 'labels' - and 'annotations' fields may be set. If labels - or annotations overlap with in-built values, - the values here will override the in-built values. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that should be added - to the create ACME HTTP01 solver pods. - type: object - labels: - additionalProperties: - type: string - description: Labels that should be added to - the created ACME HTTP01 solver pods. - type: object - type: object - spec: - description: PodSpec defines overrides for the - HTTP01 challenge solver pod. Only the 'priorityClassName', - 'nodeSelector', 'affinity', 'serviceAccountName' - and 'tolerations' fields are supported currently. - All other fields will be ignored. - properties: - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node matches the - corresponding matchExpressions; - the node(s) with the highest sum - are the most preferred. - items: - description: An empty preferred - scheduling term matches all objects - with implicit weight 0 (i.e. it's - a no-op). A null preferred scheduling - term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector - term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated - with matching the corresponding - nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to an update), the system - may or may not try to eventually - evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list - of node selector terms. The - terms are ORed. - items: - description: A null or empty - node selector term matches - no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of - the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node - selector requirements - by node's labels. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node - selector requirements - by node's fields. - items: - description: A node selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: The label - key that the selector - applies to. - type: string - operator: - description: Represents - a key's relationship - to a set of values. - Valid operators - are In, NotIn, Exists, - DoesNotExist. Gt, - and Lt. - type: string - values: - description: An array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. If - the operator is - Gt or Lt, the values - array must have - a single element, - which will be interpreted - as an integer. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the - same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - affinity expressions, etc.), compute - a sum by iterating through the elements - of this field and adding "weight" - to the sum if the node has pods - which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements - specified by this field are not - met at scheduling time, the pod - will not be scheduled onto the node. - If the affinity requirements specified - by this field cease to be met at - some point during pod execution - (e.g. due to a pod label update), - the system may or may not try to - eventually evict the pod from its - node. When there are multiple elements, - the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity - scheduling rules (e.g. avoid putting - this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer - to schedule pods to nodes that satisfy - the anti-affinity expressions specified - by this field, but it may choose - a node that violates one or more - of the expressions. The node that - is most preferred is the one with - the greatest sum of weights, i.e. - for each node that meets all of - the scheduling requirements (resource - request, requiredDuringScheduling - anti-affinity expressions, etc.), - compute a sum by iterating through - the elements of this field and adding - "weight" to the sum if the node - has pods which matches the corresponding - podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all - of the matched WeightedPodAffinityTerm - fields are added per-node to find - the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod - affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query - over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label - selector requirements. - The requirements are - ANDed. - items: - description: A label - selector requirement - is a selector that - contains values, - a key, and an operator - that relates the - key and values. - properties: - key: - description: key - is the label - key that the - selector applies - to. - type: string - operator: - description: operator - represents a - key's relationship - to a set of - values. Valid - operators are - In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array - of string values. - If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or - DoesNotExist, - the values array - must be empty. - This array is - replaced during - a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels - map is equivalent - to an element of matchExpressions, - whose key field is - "key", the operator - is "In", and the values - array contains only - "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces - specifies which namespaces - the labelSelector applies - to (matches against); - null or empty list means - "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) - or not co-located (anti-affinity) - with the pods matching - the labelSelector in the - specified namespaces, - where co-located is defined - as running on a node whose - value of the label with - key topologyKey matches - that of any node on which - any of the selected pods - is running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated - with matching the corresponding - podAffinityTerm, in the range - 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity - requirements specified by this field - are not met at scheduling time, - the pod will not be scheduled onto - the node. If the anti-affinity requirements - specified by this field cease to - be met at some point during pod - execution (e.g. due to a pod label - update), the system may or may not - try to eventually evict the pod - from its node. When there are multiple - elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. - items: - description: Defines a set of pods - (namely those matching the labelSelector - relative to the given namespace(s)) - that this pod should be co-located - (affinity) or not co-located (anti-affinity) - with, where co-located is defined - as running on a node whose value - of the label with key - matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: A label selector - requirement is a selector - that contains values, - a key, and an operator - that relates the key - and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: operator - represents a key's - relationship to - a set of values. - Valid operators - are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values - is an array of string - values. If the operator - is In or NotIn, - the values array - must be non-empty. - If the operator - is Exists or DoesNotExist, - the values array - must be empty. This - array is replaced - during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels - is a map of {key,value} - pairs. A single {key,value} - in the matchLabels map - is equivalent to an element - of matchExpressions, whose - key field is "key", the - operator is "In", and - the values array contains - only "value". The requirements - are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies - which namespaces the labelSelector - applies to (matches against); - null or empty list means "this - pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should - be co-located (affinity) or - not co-located (anti-affinity) - with the pods matching the - labelSelector in the specified - namespaces, where co-located - is defined as running on a - node whose value of the label - with key topologyKey matches - that of any node on which - any of the selected pods is - running. Empty topologyKey - is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is a selector which - must be true for the pod to fit on a node. - Selector which must match a node''s labels - for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - priorityClassName: - description: If specified, the pod's priorityClassName. - type: string - serviceAccountName: - description: If specified, the pod's service - account - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is - attached to tolerates any taint that matches - the triple using the - matching operator . - properties: - effect: - description: Effect indicates the taint - effect to match. Empty means match - all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Key is the taint key that - the toleration applies to. Empty means - match all taint keys. If the key is - empty, operator must be Exists; this - combination means to match all values - and all keys. - type: string - operator: - description: Operator represents a key's - relationship to the value. Valid operators - are Exists and Equal. Defaults to - Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents - the period of time the toleration - (which must be of effect NoExecute, - otherwise this field is ignored) tolerates - the taint. By default, it is not set, - which means tolerate the taint forever - (do not evict). Zero and negative - values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value - the toleration matches to. If the - operator is Exists, the value should - be empty, otherwise just a regular - string. - type: string - type: object - type: array - type: object - type: object - serviceType: - description: Optional service type for Kubernetes - solver service - type: string - type: object - type: object - selector: - description: Selector selects a set of DNSNames on the Certificate - resource that should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' - solver with the lowest priority, i.e. if any other solver - has a more specific match, it will be used instead. - properties: - dnsNames: - description: List of DNSNames that this solver will be - used to solve. If specified and a match is found, a - dnsNames selector will take precedence over a dnsZones - selector. If multiple solvers match with the same dnsNames - value, the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - dnsZones: - description: List of DNSZones that this solver will be - used to solve. The most specific DNS zone match specified - here will take precedence over other DNS zone matches, - so a solver specifying sys.example.com will be selected - over one specifying example.com for the domain www.sys.example.com. - If multiple solvers match with the same dnsZones value, - the solver with the most matching labels in matchLabels - will be selected. If neither has more matches, the solver - defined earlier in the list will be selected. - items: - type: string - type: array - matchLabels: - additionalProperties: - type: string - description: A label selector that is used to refine the - set of certificate's that this challenge solver will - apply to. - type: object - type: object - type: object - type: array - required: - - privateKeySecretRef - - server - type: object - ca: - description: CA configures this issuer to sign certificates using a - signing CA keypair stored in a Secret resource. This is used to build - internal PKIs that are managed by cert-manager. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set, - certificates will be issued without distribution points set. - items: - type: string - type: array - secretName: - description: SecretName is the name of the secret used to sign Certificates - issued by this Issuer. - type: string - required: - - secretName - type: object - selfSigned: - description: SelfSigned configures this issuer to 'self sign' certificates - using the private key used to create the CertificateRequest object. - properties: - crlDistributionPoints: - description: The CRL distribution points is an X.509 v3 certificate - extension which identifies the location of the CRL from which - the revocation of this certificate can be checked. If not set - certificate will be issued without CDP. Values are strings. - items: - type: string - type: array - type: object - vault: - description: Vault configures this issuer to sign certificates using - a HashiCorp Vault PKI backend. - properties: - auth: - description: Auth configures how cert-manager authenticates with - the Vault server. - properties: - appRole: - description: AppRole authenticates with Vault using the App - Role auth mechanism, with the role and secret stored in a - Kubernetes Secret resource. - properties: - path: - description: 'Path where the App Role authentication backend - is mounted in Vault, e.g: "approle"' - type: string - roleId: - description: RoleID configured in the App Role authentication - backend when setting up the authentication backend in - Vault. - type: string - secretRef: - description: Reference to a key in a Secret that contains - the App Role secret used to authenticate with Vault. The - `key` field must be specified and denotes which entry - within the Secret resource is used as the app role secret. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - path - - roleId - - secretRef - type: object - kubernetes: - description: Kubernetes authenticates with Vault by passing - the ServiceAccount token stored in the named Secret resource - to the Vault server. - properties: - mountPath: - description: The Vault mountPath here is the mount path - to use when authenticating with Vault. For example, setting - a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` - to authenticate with Vault. If unspecified, the default - value "/v1/auth/kubernetes" will be used. - type: string - role: - description: A required field containing the Vault Role - to assume. A Role binds a Kubernetes ServiceAccount with - a set of Vault policies. - type: string - secretRef: - description: The required Secret field containing a Kubernetes - ServiceAccount JWT used for authenticating with Vault. - Use of 'ambient credentials' is not supported. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - required: - - role - - secretRef - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by presenting - a token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - type: object - caBundle: - description: PEM encoded CA bundle used to validate Vault server - certificate. Only used if the Server URL is using HTTPS protocol. - This parameter is ignored for plain HTTP protocol connection. - If not set the system root certificates are used to validate the - TLS connection. - format: byte - type: string - namespace: - description: 'Name of the vault namespace. Namespaces is a set of - features within Vault Enterprise that allows Vault environments - to support Secure Multi-tenancy. e.g: "ns1" More about namespaces - can be found here https://www.vaultproject.io/docs/enterprise/namespaces' - type: string - path: - description: 'Path is the mount path of the Vault PKI backend''s - `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' - type: string - server: - description: 'Server is the connection address for the Vault server, - e.g: "https://vault.example.com:8200".' - type: string - required: - - auth - - path - - server - type: object - venafi: - description: Venafi configures this issuer to sign certificates using - a Venafi TPP or Venafi Cloud policy zone. - properties: - cloud: - description: Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. - properties: - apiTokenSecretRef: - description: APITokenSecretRef is a secret key selector for - the Venafi Cloud API token. - properties: - key: - description: The key of the entry in the Secret resource's - `data` field to be used. Some instances of this field - may be defaulted, in others it may be required. - type: string - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: URL is the base URL for Venafi Cloud. Defaults - to "https://api.venafi.cloud/v1". - type: string - required: - - apiTokenSecretRef - type: object - tpp: - description: TPP specifies Trust Protection Platform configuration - settings. Only one of TPP or Cloud may be specified. - properties: - caBundle: - description: CABundle is a PEM encoded TLS certificate to use - to verify connections to the TPP instance. If specified, system - roots will not be used and the issuing CA for the TPP instance - must be verifiable using the provided root. If not specified, - the connection will be verified using the cert-manager system - root certificates. - format: byte - type: string - credentialsRef: - description: CredentialsRef is a reference to a Secret containing - the username and password for the TPP server. The secret must - contain two keys, 'username' and 'password'. - properties: - name: - description: 'Name of the resource being referred to. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - name - type: object - url: - description: 'URL is the base URL for the vedsdk endpoint of - the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' - type: string - required: - - credentialsRef - - url - type: object - zone: - description: Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by - the named zone policy. This field is required. - type: string - required: - - zone - type: object - type: object - status: - description: Status of the Issuer. This is set and managed automatically. - properties: - acme: - description: ACME specific status options. This field should only be - set if the Issuer is configured to use an ACME server to issue certificates. - properties: - lastRegisteredEmail: - description: LastRegisteredEmail is the email associated with the - latest registered ACME account, in order to track changes made - to registered account associated with the Issuer - type: string - uri: - description: URI is the unique account identifier, which can also - be used to retrieve account details from the CA - type: string - type: object - conditions: - description: List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. - items: - description: IssuerCondition contains condition information for an - Issuer. - properties: - lastTransitionTime: - description: LastTransitionTime is the timestamp corresponding - to the last status change of this condition. - format: date-time - type: string - message: - description: Message is a human readable description of the details - of the last transition, complementing reason. - type: string - reason: - description: Reason is a brief machine readable explanation for - the condition's last transition. - type: string - status: - description: Status of the condition, one of ('True', 'False', - 'Unknown'). - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: Type of the condition, known values are ('Ready'). - type: string - required: - - status - - type - type: object - type: array - type: object - required: - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from-secret: '{{ template "webhook.caRef" . }}' - labels: - app: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/managed-by: '{{ .Release.Service }}' - app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - helm.sh/chart: '{{ template "cert-manager.chart" . }}' - name: orders.acme.cert-manager.io -spec: - additionalPrinterColumns: - - JSONPath: .status.state - name: State - type: string - - JSONPath: .spec.issuerRef.name - name: Issuer - priority: 1 - type: string - - JSONPath: .status.reason - name: Reason - priority: 1 - type: string - - JSONPath: .metadata.creationTimestamp - description: CreationTimestamp is a timestamp representing the server time when - this object was created. It is not guaranteed to be set in happens-before order - across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. - name: Age - type: date - group: acme.cert-manager.io - names: - kind: Order - listKind: OrderList - plural: orders - singular: order - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Order is a type to represent an Order with an ACME server - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - commonName: - description: CommonName is the common name as specified on the DER encoded - CSR. If specified, this value must also be present in `dnsNames` or - `ipAddresses`. This field must match the corresponding field on the - DER encoded CSR. - type: string - dnsNames: - description: DNSNames is a list of DNS names that should be included - as part of the Order validation process. This field must match the - corresponding field on the DER encoded CSR. - items: - type: string - type: array - duration: - description: Duration is the duration for the not after date for the - requested certificate. this is set on order creation as pe the ACME - spec. - type: string - ipAddresses: - description: IPAddresses is a list of IP addresses that should be included - as part of the Order validation process. This field must match the - corresponding field on the DER encoded CSR. - items: - type: string - type: array - issuerRef: - description: IssuerRef references a properly configured ACME-type Issuer - which should be used to create this Order. If the Issuer does not - exist, processing will be retried. If the Issuer is not an 'ACME' - Issuer, an error will be returned and the Order will be marked as - failed. - properties: - group: - description: Group of the resource being referred to. - type: string - kind: - description: Kind of the resource being referred to. - type: string - name: - description: Name of the resource being referred to. - type: string - required: - - name - type: object - request: - description: Certificate signing request bytes in DER encoding. This - will be used when finalizing the order. This field must be set on - the order. - format: byte - type: string - required: - - issuerRef - - request - type: object - status: - properties: - authorizations: - description: Authorizations contains data returned from the ACME server - on what authorizations must be completed in order to validate the - DNS names specified on the Order. - items: - description: ACMEAuthorization contains data returned from the ACME - server on an authorization that must be completed in order validate - a DNS name on an ACME Order resource. - properties: - challenges: - description: Challenges specifies the challenge types offered - by the ACME server. One of these challenge types will be selected - when validating the DNS name and an appropriate Challenge resource - will be created to perform the ACME challenge process. - items: - description: Challenge specifies a challenge offered by the - ACME server for an Order. An appropriate Challenge resource - can be created to perform the ACME challenge process. - properties: - token: - description: Token is the token that must be presented for - this challenge. This is used to compute the 'key' that - must also be presented. - type: string - type: - description: Type is the type of challenge being offered, - e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the - raw value retrieved from the ACME server. Only 'http-01' - and 'dns-01' are supported by cert-manager, other values - will be ignored. - type: string - url: - description: URL is the URL of this challenge. It can be - used to retrieve additional metadata about the Challenge - from the ACME server. - type: string - required: - - token - - type - - url - type: object - type: array - identifier: - description: Identifier is the DNS name to be validated as part - of this authorization - type: string - initialState: - description: InitialState is the initial state of the ACME authorization - when first fetched from the ACME server. If an Authorization - is already 'valid', the Order controller will not create a Challenge - resource for the authorization. This will occur when working - with an ACME server that enables 'authz reuse' (such as Let's - Encrypt's production endpoint). If not set and 'identifier' - is set, the state is assumed to be pending and a Challenge will - be created. - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - url: - description: URL is the URL of the Authorization that must be - completed - type: string - wildcard: - description: Wildcard will be true if this authorization is for - a wildcard DNS name. If this is true, the identifier will be - the *non-wildcard* version of the DNS name. For example, if - '*.example.com' is the DNS name being validated, this field - will be 'true' and the 'identifier' field will be 'example.com'. - type: boolean - required: - - url - type: object - type: array - certificate: - description: Certificate is a copy of the PEM encoded certificate for - this Order. This field will be populated after the order has been - successfully finalized with the ACME server, and the order has transitioned - to the 'valid' state. - format: byte - type: string - failureTime: - description: FailureTime stores the time that this order failed. This - is used to influence garbage collection and back-off. - format: date-time - type: string - finalizeURL: - description: FinalizeURL of the Order. This is used to obtain certificates - for this order once it has been completed. - type: string - reason: - description: Reason optionally provides more information about a why - the order is in the current state. - type: string - state: - description: State contains the current state of this Order resource. - States 'success' and 'expired' are 'final' - enum: - - valid - - ready - - pending - - processing - - invalid - - expired - - errored - type: string - url: - description: URL of the Order. This will initially be empty when the - resource is first created. The Order controller will populate this - field when the Order is first processed. This field will be immutable - after it is initially set. - type: string - type: object - required: - - metadata - - spec - version: v1 - versions: - - name: v1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - -{{- end }} -{{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml b/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml index 778efb2a..80110d79 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/crds.yaml @@ -1,4 +1,3 @@ -{{- if (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} {{- if .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -22,6 +21,8 @@ spec: - cr - crs singular: certificaterequest + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -56,7 +57,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object properties: apiVersion: @@ -85,7 +86,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -162,14 +163,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -198,7 +199,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object properties: apiVersion: @@ -227,7 +228,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -304,14 +305,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -340,7 +341,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object required: - spec @@ -367,7 +368,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -448,14 +449,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -484,7 +485,7 @@ spec: type: date schema: openAPIV3Schema: - description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a 'one-shot' resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object required: - spec @@ -511,7 +512,7 @@ spec: description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this CertificateRequest. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to 'cert-manager.io' if empty. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -592,14 +593,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', 'InvalidRequest'). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`). type: string failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. @@ -636,6 +637,8 @@ spec: - cert - certs singular: certificate + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -719,7 +722,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -734,19 +737,19 @@ spec: description: Name of the resource being referred to. type: string keyAlgorithm: - description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. type: string enum: - rsa - ecdsa keyEncoding: - description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then PKCS#1 will be used by default. + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. type: string enum: - pkcs1 - pkcs8 keySize: - description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. type: integer keystores: description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. @@ -911,14 +914,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1014,7 +1017,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1029,19 +1032,19 @@ spec: description: Name of the resource being referred to. type: string keyAlgorithm: - description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. type: string enum: - rsa - ecdsa keyEncoding: - description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then PKCS#1 will be used by default. + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. type: string enum: - pkcs1 - pkcs8 keySize: - description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. type: integer keystores: description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. @@ -1055,7 +1058,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. @@ -1077,7 +1080,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. @@ -1206,14 +1209,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1311,7 +1314,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1378,13 +1381,13 @@ spec: type: object properties: algorithm: - description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `algorithm` is specified and `size` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. type: string enum: - RSA - ECDSA encoding: - description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. type: string enum: - PKCS1 @@ -1503,14 +1506,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1608,7 +1611,7 @@ spec: description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: IssuerRef is a reference to the issuer for this certificate. If the 'kind' field is not set, or set to 'Issuer', an Issuer resource with the given name in the same namespace as the Certificate will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the provided name will be used. The 'name' field in this stanza is required at all times. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -1634,7 +1637,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. @@ -1656,7 +1659,7 @@ spec: - passwordSecretRef properties: create: - description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. @@ -1675,13 +1678,13 @@ spec: type: object properties: algorithm: - description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either "rsa" or "ecdsa" If `algorithm` is specified and `size` is not provided, key size of 256 will be used for "ecdsa" key algorithm and key size of 2048 will be used for "rsa" key algorithm. + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. type: string enum: - RSA - ECDSA encoding: - description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. type: string enum: - PKCS1 @@ -1800,14 +1803,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready', `Issuing`). + description: Type of the condition, known values are (`Ready`, `Issuing`). type: string lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. @@ -1859,6 +1862,9 @@ spec: listKind: ChallengeList plural: challenges singular: challenge + categories: + - cert-manager + - cert-manager-acme scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -5200,6 +5206,8 @@ spec: listKind: ClusterIssuerList plural: clusterissuers singular: clusterissuer + categories: + - cert-manager scope: Cluster conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -6043,6 +6051,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -6231,14 +6244,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -7072,6 +7085,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -7260,14 +7278,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -8103,6 +8121,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -8291,14 +8314,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -9134,6 +9157,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -9322,14 +9350,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: true @@ -9359,6 +9387,8 @@ spec: listKind: IssuerList plural: issuers singular: issuer + categories: + - cert-manager scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -10202,6 +10232,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -10390,14 +10425,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -11231,6 +11266,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -11419,14 +11459,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -12262,6 +12302,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -12450,14 +12495,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: false @@ -13293,6 +13338,11 @@ spec: type: array items: type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate wil be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string secretName: description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string @@ -13481,14 +13531,14 @@ spec: description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: - description: Status of the condition, one of ('True', 'False', 'Unknown'). + description: Status of the condition, one of (`True`, `False`, `Unknown`). type: string enum: - "True" - "False" - Unknown type: - description: Type of the condition, known values are ('Ready'). + description: Type of the condition, known values are (`Ready`). type: string served: true storage: true @@ -13518,6 +13568,9 @@ spec: listKind: OrderList plural: orders singular: order + categories: + - cert-manager + - cert-manager-acme scope: Namespaced conversion: # a Webhook strategy instruct API server to call an external webhook for any conversion between custom resources. @@ -14169,4 +14222,3 @@ status: storedVersions: [] --- {{- end }} -{{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml b/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml index 3daeaff2..39a69185 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/deployment.yaml @@ -86,7 +86,18 @@ spec: {{- else }} - --cluster-resource-namespace=$(POD_NAMESPACE) {{- end }} - - --leader-election-namespace={{ .Values.global.leaderElection.namespace }} + {{- with .Values.global.leaderElection }} + - --leader-election-namespace={{ .namespace }} + {{- if .leaseDuration }} + - --leader-election-lease-duration={{ .leaseDuration }} + {{- end }} + {{- if .renewDeadline }} + - --leader-election-renew-deadline={{ .renewDeadline }} + {{- end }} + {{- if .retryPeriod }} + - --leader-election-retry-period={{ .retryPeriod }} + {{- end }} + {{- end }} {{- if .Values.extraArgs }} {{ toYaml .Values.extraArgs | indent 10 }} {{- end }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml b/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml index c7244595..a9b85392 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/rbac.yaml @@ -220,7 +220,7 @@ rules: - apiGroups: [""] resources: ["pods", "services"] verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses"] verbs: ["get", "list", "watch", "create", "delete", "update"] # We require the ability to specify a custom hostname when we are creating @@ -261,13 +261,13 @@ rules: - apiGroups: ["cert-manager.io"] resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] verbs: ["get", "list", "watch"] - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses"] verbs: ["get", "list", "watch"] # We require these rules to support users with the OwnerReferencesPermissionEnforcement # admission controller enabled: # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["extensions"] + - apiGroups: ["networking.k8s.io"] resources: ["ingresses/finalizers"] verbs: ["update"] - apiGroups: [""] diff --git a/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml index 56ff07b7..99309c0e 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/webhook-mutating-webhook.yaml @@ -1,10 +1,4 @@ -{{- $isV1AdmissionRegistration := false -}} -{{- if (or (not (.Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1")) (.Capabilities.APIVersions.Has "hacking-helm.i-wish-this-wasnt-required.cert-manager.io/force-v1beta1-webhooks") ) }} -apiVersion: admissionregistration.k8s.io/v1beta1 -{{- else }} -{{- $isV1AdmissionRegistration = true -}} apiVersion: admissionregistration.k8s.io/v1 -{{- end }} kind: MutatingWebhookConfiguration metadata: name: {{ include "webhook.fullname" . }} @@ -33,21 +27,12 @@ webhooks: - UPDATE resources: - "*/*" - {{- if $isV1AdmissionRegistration }} admissionReviewVersions: ["v1", "v1beta1"] timeoutSeconds: {{ .Values.webhook.timeoutSeconds }} - {{- end }} failurePolicy: Fail -{{- if (semverCompare ">=1.12-0" .Capabilities.KubeVersion.GitVersion) }} # Only include 'sideEffects' field in Kubernetes 1.12+ sideEffects: None -{{- end }} clientConfig: -{{- if (semverCompare "<=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Set caBundle to empty to avoid https://github.com/kubernetes/kubernetes/pull/70138 - # in Kubernetes 1.12 and below. - caBundle: "" -{{- end }} service: name: {{ template "webhook.fullname" . }} namespace: {{ .Release.Namespace | quote }} diff --git a/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml b/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml index c45461e0..64c8d73d 100644 --- a/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml +++ b/lib/common/bootstrap/charts/cert-manager/templates/webhook-validating-webhook.yaml @@ -1,10 +1,4 @@ -{{- $isV1AdmissionRegistration := false -}} -{{- if (or (not (.Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1")) (.Capabilities.APIVersions.Has "hacking-helm.i-wish-this-wasnt-required.cert-manager.io/force-v1beta1-webhooks") ) }} -apiVersion: admissionregistration.k8s.io/v1beta1 -{{- else }} -{{- $isV1AdmissionRegistration = true -}} apiVersion: admissionregistration.k8s.io/v1 -{{- end }} kind: ValidatingWebhookConfiguration metadata: name: {{ include "webhook.fullname" . }} @@ -43,21 +37,11 @@ webhooks: - UPDATE resources: - "*/*" - {{- if $isV1AdmissionRegistration }} admissionReviewVersions: ["v1", "v1beta1"] timeoutSeconds: {{ .Values.webhook.timeoutSeconds }} - {{- end }} failurePolicy: Fail -{{- if (semverCompare ">=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Only include 'sideEffects' field in Kubernetes 1.12+ sideEffects: None -{{- end }} clientConfig: -{{- if (semverCompare "<=1.12-0" .Capabilities.KubeVersion.GitVersion) }} - # Set caBundle to empty to avoid https://github.com/kubernetes/kubernetes/pull/70138 - # in Kubernetes 1.12 and below. - caBundle: "" -{{- end }} service: name: {{ template "webhook.fullname" . }} namespace: {{ .Release.Namespace | quote }} diff --git a/lib/common/bootstrap/charts/cert-manager/values.yaml b/lib/common/bootstrap/charts/cert-manager/values.yaml index e7f0f09f..9f5bb85c 100644 --- a/lib/common/bootstrap/charts/cert-manager/values.yaml +++ b/lib/common/bootstrap/charts/cert-manager/values.yaml @@ -24,6 +24,21 @@ global: # Override the namespace used to store the ConfigMap for leader election namespace: "kube-system" + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + installCRDs: false replicaCount: 1 diff --git a/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml b/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml index 07483cf9..73df2195 100644 --- a/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml +++ b/lib/common/bootstrap/charts/kube-prometheus-stack/values.yaml @@ -1377,7 +1377,7 @@ prometheusOperator: # Use certmanager to generate webhook certs certManager: - enabled: true + enabled: false # issuerRef: # name: "issuer" # kind: "ClusterIssuer" diff --git a/lib/common/services/mysql/Chart.lock b/lib/common/services/mysql/Chart.lock new file mode 100644 index 00000000..eb4df7fb --- /dev/null +++ b/lib/common/services/mysql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.1 +digest: sha256:1056dac8da880ed967a191e8d9eaf04766f77bda66a5715456d5dd4494a4a942 +generated: "2022-04-26T23:27:43.795807925Z" diff --git a/lib/common/services/mysql/Chart.yaml b/lib/common/services/mysql/Chart.yaml index 799cdc8d..f42519b3 100644 --- a/lib/common/services/mysql/Chart.yaml +++ b/lib/common/services/mysql/Chart.yaml @@ -1,19 +1,28 @@ -apiVersion: v1 -appVersion: 8.0.20 -description: Chart to create a Highly available MySQL cluster -engine: gotpl -home: https://mysql.com +annotations: + category: Database +apiVersion: v2 +appVersion: 8.0.29 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: MySQL is a fast, reliable, scalable, and easy to use open source relational + database system. Designed to handle mission-critical, heavy-load production applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/mysql icon: https://bitnami.com/assets/stacks/mysql/img/mysql-stack-220x234.png keywords: - mysql - database - sql - cluster -- high availablity +- high availability maintainers: - email: containers@bitnami.com name: Bitnami name: mysql sources: - https://github.com/bitnami/bitnami-docker-mysql -version: 6.14.2 +- https://mysql.com +version: 8.9.6 diff --git a/lib/common/services/mysql/README.md b/lib/common/services/mysql/README.md index 273102fb..e9618274 100644 --- a/lib/common/services/mysql/README.md +++ b/lib/common/services/mysql/README.md @@ -1,8 +1,14 @@ -# MySQL + -[MySQL](https://mysql.com) is a fast, reliable, scalable, and easy to use open-source relational database system. MySQL Server is intended for mission-critical, heavy-load production systems as well as for embedding into mass-deployed software. +# MySQL packaged by Bitnami -## TL;DR; +MySQL is a fast, reliable, scalable, and easy to use open source relational database system. Designed to handle mission-critical, heavy-load production applications. + +[Overview of MySQL](http://www.mysql.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR ```bash $ helm repo add bitnami https://charts.bitnami.com/bitnami @@ -11,14 +17,14 @@ $ helm install my-release bitnami/mysql ## Introduction -This chart bootstraps a [MySQL](https://github.com/bitnami/bitnami-docker-mysql) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +This chart bootstraps a [MySQL](https://github.com/bitnami/bitnami-docker-mysql) replication cluster deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. ## Prerequisites -- Kubernetes 1.12+ -- Helm 2.12+ or Helm 3.0-beta3+ +- Kubernetes 1.19+ +- Helm 3.2.0+ - PV provisioner support in the underlying infrastructure ## Installing the Chart @@ -46,141 +52,292 @@ The command removes all the Kubernetes components associated with the chart and ## Parameters -The following tables lists the configurable parameters of the MySQL chart and their default values. -| Parameter | Description | Default | | -|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `global.imageRegistry` | Global Docker image registry | `nil` | | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | | -| `image.registry` | MySQL image registry | `docker.io` | | -| `image.repository` | MySQL Image name | `bitnami/mysql` | | -| `image.tag` | MySQL Image tag | `{TAG_NAME}` | | -| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | | -| `image.debug` | Specify if debug logs should be enabled | `false` | | -| `nameOverride` | String to partially override mysql.fullname template with a string (will prepend the release name) | `nil` | | -| `fullnameOverride` | String to fully override mysql.fullname template with a string | `nil` | | -| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | | -| `volumePermissions.resources` | Init container resource requests/limit | `nil` | | -| `existingSecret` | Specify the name of an existing secret for password details (`root.password`, `db.password`, `replication.password` will be ignored and picked up from this secret). The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password`. | `nil` | | -| `root.password` | Password for the `root` user | _random 10 character alphanumeric string_ | | -| `root.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `root.injectSecretsAsVolume` | Mount admin user password as a file instead of using an environment variable | `false` | | -| `db.user` | Username of new user to create (should be different from replication.user) | `nil` | | -| `db.password` | Password for the new user | _random 10 character alphanumeric string if `db.user` is defined_ | | -| `db.name` | Name for new database to create | `my_database` | | -| `db.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `db.injectSecretsAsVolume` | Mount user password as a file instead of using an environment variable | `false` | | -| `replication.enabled` | MySQL replication enabled | `true` | | -| `replication.user` | MySQL replication user (should be different from db.user) | `replicator` | | -| `replication.password` | MySQL replication user password | _random 10 character alphanumeric string_ | | -| `replication.forcePassword` | Force users to specify a password. That is required for 'helm upgrade' to work properly | `false` | | -| `replication.injectSecretsAsVolume` | Mount user password as a file instead of using an environment variable | `false` | | -| `initdbScripts` | Dictionary of initdb scripts | `nil` | | -| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `nil` | | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | | -| `serviceAccount.name` | If serviceAccount.create is enabled, what should the serviceAccount name be - otherwise defaults to the fullname | `nil` | | -| `master.config` | Config file for the MySQL Master server | `_default values in the values.yaml file_` | | -| `master.updateStrategy.type` | Master statefulset update strategy policy | `RollingUpdate` | | -| `master.podAnnotations` | Pod annotations for master nodes | `{}` | | -| `master.affinity` | Map of node/pod affinities for master nodes | `{}` (The value is evaluated as a template) | | -| `master.nodeSelector` | Node labels for pod assignment on master nodes | `{}` (The value is evaluated as a template) | | -| `master.tolerations` | Tolerations for pod assignment on master nodes | `[]` (The value is evaluated as a template) | | -| `master.securityContext.enabled` | Enable security context for master nodes | `true` | | -| `master.securityContext.fsGroup` | Group ID for the master nodes' containers | `1001` | | -| `master.securityContext.runAsUser` | User ID for the master nodes' containers | `1001` | | -| `master.containerSecurityContext` | Container security context for master nodes' containers | `{}` | | -| `master.resources` | CPU/Memory resource requests/limits for master nodes' containers | `{}` | | -| `master.livenessProbe.enabled` | Turn on and off liveness probe (master nodes) | `true` | | -| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master nodes) | `120` | | -| `master.livenessProbe.periodSeconds` | How often to perform the probe (master nodes) | `10` | | -| `master.livenessProbe.timeoutSeconds` | When the probe times out (master nodes) | `1` | | -| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (master nodes) | `1` | | -| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (master nodes) | `3` | | -| `master.readinessProbe.enabled` | Turn on and off readiness probe (master nodes) | `true` | | -| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master nodes) | `30` | | -| `master.readinessProbe.periodSeconds` | How often to perform the probe (master nodes) | `10` | | -| `master.readinessProbe.timeoutSeconds` | When the probe times out (master nodes) | `1` | | -| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (master nodes) | `1` | | -| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (master nodes) | `3` | | -| `master.extraEnvVars` | Array containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.extraEnvVarsCM` | Configmap containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.extraEnvVarsSecret` | Secret containing extra env vars to configure MySQL master replicas | `nil` | | -| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` (master nodes) | `true` | | -| `master.persistence.mountPath` | Configure `PersistentVolumeClaim` mount path (master nodes) | `/bitnami/mysql` | | -| `master.persistence.annotations` | Persistent Volume Claim annotations (master nodes) | `{}` | | -| `master.persistence.storageClass` | Persistent Volume Storage Class (master nodes) | `` | | -| `master.persistence.accessModes` | Persistent Volume Access Modes (master nodes) | `[ReadWriteOnce]` | | -| `master.persistence.size` | Persistent Volume Size (master nodes) | `8Gi` | | -| `master.persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (master nodes) | `nil` | | -| `slave.replicas` | Desired number of slave replicas | `1` | | -| `slave.updateStrategy.type` | Slave statefulset update strategy policy | `RollingUpdate` | | -| `slave.podAnnotations` | Pod annotations for slave nodes | `{}` | | -| `slave.affinity` | Map of node/pod affinities for slave nodes | `{}` (The value is evaluated as a template) | | -| `slave.nodeSelector` | Node labels for pod assignment on slave nodes | `{}` (The value is evaluated as a template) | | -| `slave.tolerations` | Tolerations for pod assignment on slave nodes | `[]` (The value is evaluated as a template) | | -| `slave.extraEnvVars` | Array containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.extraEnvVarsCM` | ConfigMap containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.extraEnvVarsSecret` | Secret containing extra env vars to configure MySQL slave replicas | `nil` | | -| `slave.securityContext.enabled` | Enable security context for slave nodes | `true` | | -| `slave.securityContext.fsGroup` | Group ID for the slave nodes' containers | `1001` | | -| `slave.securityContext.runAsUser` | User ID for the slave nodes' containers | `1001` | | -| `slave.containerSecurityContext` | Container security context for slave nodes' containers | `{}` | | -| `slave.resources` | CPU/Memory resource requests/limits for slave nodes' containers | `{}` | | -| `slave.livenessProbe.enabled` | Turn on and off liveness probe (slave nodes) | `true` | | -| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (slave nodes) | `120` | | -| `slave.livenessProbe.periodSeconds` | How often to perform the probe (slave nodes) | `10` | | -| `slave.livenessProbe.timeoutSeconds` | When the probe times out (slave nodes) | `1` | | -| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe (slave nodes) | `1` | | -| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave nodes) | `3` | | -| `slave.readinessProbe.enabled` | Turn on and off readiness probe (slave nodes) | `true` | | -| `slave.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (slave nodes) | `30` | | -| `slave.readinessProbe.periodSeconds` | How often to perform the probe (slave nodes) | `10` | | -| `slave.readinessProbe.timeoutSeconds` | When the probe times out (slave nodes) | `1` | | -| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe (slave nodes) | `1` | | -| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe (slave nodes) | `3` | | -| `slave.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` (slave nodes) | `true` | | -| `slave.persistence.mountPath` | Configure `PersistentVolumeClaim` mount path (slave nodes) | `/bitnami/mysql` | | -| `slave.persistence.annotations` | Persistent Volume Claim annotations (slave nodes) | `{}` | | -| `slave.persistence.storageClass` | Persistent Volume Storage Class (slave nodes) | `` | | -| `slave.persistence.accessModes` | Persistent Volume Access Modes (slave nodes) | `[ReadWriteOnce]` | | -| `slave.persistence.size` | Persistent Volume Size (slave nodes) | `8Gi` | | -| `slave.persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (slave nodes) | `nil` | | -| `service.type` | Kubernetes service type | `ClusterIP` | | -| `service.port` | MySQL service port | `3306` | | -| `service.nodePort.master` | Port to bind to for NodePort service type (master service) | `nil` | | -| `service.nodePort.slave` | Port to bind to for NodePort service type (slave service) | `nil` | | -| `service.loadBalancerIP.master` | Static IP Address to use for master LoadBalancer service type | `nil` | | -| `service.loadBalancerIP.slave` | Static IP Address to use for slaves LoadBalancer service type | `nil` | | -| `service.annotations` | Kubernetes service annotations | `{}` | | -| `metrics.enabled` | Start a side-car prometheus exporter | `false` | | -| `metrics.image` | Exporter image name | `bitnami/mysqld-exporter` | | -| `metrics.imageTag` | Exporter image tag | `{TAG_NAME}` | | -| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` | | -| `metrics.resources` | Exporter resource requests/limit | `nil` | | -| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | | -| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | | -| `metrics.service.annotations` | Prometheus exporter svc annotations | `{prometheus.io/scrape: "true", prometheus.io/port: "9104"}` | | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | | -| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | | -| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | | -| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `nil` | The above parameters map to the env variables defined in [bitnami/mysql](http://github.com/bitnami/bitnami-docker-mysql). For more information please refer to the [bitnami/mysql](http://github.com/bitnami/bitnami-docker-mysql) image documentation. | +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Cluster domain | `cluster.local` | +| `commonAnnotations` | Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonLabels` | Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `extraDeploy` | Array with extra yaml to deploy with the chart. Evaluated as a template | `[]` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MySQL common parameters + +| Name | Description | Value | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | MySQL image registry | `docker.io` | +| `image.repository` | MySQL image repository | `bitnami/mysql` | +| `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.29-debian-10-r0` | +| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `architecture` | MySQL architecture (`standalone` or `replication`) | `standalone` | +| `auth.rootPassword` | Password for the `root` user. Ignored if existing secret is provided | `""` | +| `auth.database` | Name for a custom database to create | `my_database` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the new user. Ignored if existing secret is provided | `""` | +| `auth.replicationUser` | MySQL replication user | `replicator` | +| `auth.replicationPassword` | MySQL replication user password. Ignored if existing secret is provided | `""` | +| `auth.existingSecret` | Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` | `""` | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `auth.customPasswordFiles` | Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` | `{}` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | + + +### MySQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------- | +| `primary.command` | Override default container command on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.args` | Override default container args on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.hostAliases` | Deployment pod host aliases | `[]` | +| `primary.configuration` | Configure MySQL Primary with a custom my.cnf file | `""` | +| `primary.existingConfigmap` | Name of existing ConfigMap with MySQL Primary configuration. | `""` | +| `primary.updateStrategy` | Update strategy type for the MySQL primary statefulset | `RollingUpdate` | +| `primary.rollingUpdatePartition` | Partition update strategy for MySQL Primary statefulset | `""` | +| `primary.podAnnotations` | Additional pod annotations for MySQL primary pods | `{}` | +| `primary.podAffinityPreset` | MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | MySQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | MySQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for MySQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for MySQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for MySQL primary pods assignment | `[]` | +| `primary.podSecurityContext.enabled` | Enable security context for MySQL primary pods | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `primary.containerSecurityContext.enabled` | MySQL primary container securityContext | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the MySQL primary container | `1001` | +| `primary.resources.limits` | The resources limits for MySQL primary containers | `{}` | +| `primary.resources.requests` | The requested resources for MySQL primary containers | `{}` | +| `primary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe | `true` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Override default liveness probe for MySQL primary containers | `{}` | +| `primary.customReadinessProbe` | Override default readiness probe for MySQL primary containers | `{}` | +| `primary.customStartupProbe` | Override default startup probe for MySQL primary containers | `{}` | +| `primary.extraFlags` | MySQL primary additional command line flags | `""` | +| `primary.extraEnvVars` | Extra environment variables to be set on MySQL primary containers | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL primary containers | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL primary containers | `""` | +| `primary.persistence.enabled` | Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir | `true` | +| `primary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MySQL primary replicas | `""` | +| `primary.persistence.storageClass` | MySQL primary persistent volume storage Class | `""` | +| `primary.persistence.annotations` | MySQL primary persistent volume claim annotations | `{}` | +| `primary.persistence.accessModes` | MySQL primary persistent volume access Modes | `["ReadWriteOnce"]` | +| `primary.persistence.size` | MySQL primary persistent volume size | `8Gi` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL Primary pod(s) | `[]` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) | `[]` | +| `primary.initContainers` | Add additional init containers for the MySQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers for the MySQL Primary pod(s) | `[]` | +| `primary.service.type` | MySQL Primary K8s service type | `ClusterIP` | +| `primary.service.port` | MySQL Primary K8s service port | `3306` | +| `primary.service.nodePort` | MySQL Primary K8s service node port | `""` | +| `primary.service.clusterIP` | MySQL Primary K8s service clusterIP IP | `""` | +| `primary.service.loadBalancerIP` | MySQL Primary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL Primary service is LoadBalancer | `[]` | +| `primary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `primary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL primary pods | `false` | +| `primary.pdb.minAvailable` | Minimum number/percentage of MySQL primary pods that should remain scheduled | `1` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of MySQL primary pods that may be made unavailable | `""` | +| `primary.podLabels` | MySQL Primary pod label. If labels are same as commonLabels , this will take precedence | `{}` | + + +### MySQL Secondary parameters + +| Name | Description | Value | +| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `secondary.replicaCount` | Number of MySQL secondary replicas | `1` | +| `secondary.hostAliases` | Deployment pod host aliases | `[]` | +| `secondary.command` | Override default container command on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.args` | Override default container args on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.configuration` | Configure MySQL Secondary with a custom my.cnf file | `""` | +| `secondary.existingConfigmap` | Name of existing ConfigMap with MySQL Secondary configuration. | `""` | +| `secondary.updateStrategy` | Update strategy type for the MySQL secondary statefulset | `RollingUpdate` | +| `secondary.rollingUpdatePartition` | Partition update strategy for MySQL Secondary statefulset | `""` | +| `secondary.podAnnotations` | Additional pod annotations for MySQL secondary pods | `{}` | +| `secondary.podAffinityPreset` | MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.podAntiAffinityPreset` | MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `secondary.nodeAffinityPreset.type` | MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.nodeAffinityPreset.key` | MySQL secondary node label key to match Ignored if `secondary.affinity` is set. | `""` | +| `secondary.nodeAffinityPreset.values` | MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. | `[]` | +| `secondary.affinity` | Affinity for MySQL secondary pods assignment | `{}` | +| `secondary.nodeSelector` | Node labels for MySQL secondary pods assignment | `{}` | +| `secondary.tolerations` | Tolerations for MySQL secondary pods assignment | `[]` | +| `secondary.podSecurityContext.enabled` | Enable security context for MySQL secondary pods | `true` | +| `secondary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `secondary.containerSecurityContext.enabled` | MySQL secondary container securityContext | `true` | +| `secondary.containerSecurityContext.runAsUser` | User ID for the MySQL secondary container | `1001` | +| `secondary.resources.limits` | The resources limits for MySQL secondary containers | `{}` | +| `secondary.resources.requests` | The requested resources for MySQL secondary containers | `{}` | +| `secondary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `secondary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `secondary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `secondary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `secondary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `secondary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `secondary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `secondary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `secondary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `secondary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `secondary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `secondary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `secondary.startupProbe.enabled` | Enable startupProbe | `true` | +| `secondary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `secondary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `secondary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `secondary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `secondary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `secondary.customLivenessProbe` | Override default liveness probe for MySQL secondary containers | `{}` | +| `secondary.customReadinessProbe` | Override default readiness probe for MySQL secondary containers | `{}` | +| `secondary.customStartupProbe` | Override default startup probe for MySQL secondary containers | `{}` | +| `secondary.extraFlags` | MySQL secondary additional command line flags | `""` | +| `secondary.extraEnvVars` | An array to add extra environment variables on MySQL secondary containers | `[]` | +| `secondary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL secondary containers | `""` | +| `secondary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL secondary containers | `""` | +| `secondary.persistence.enabled` | Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` | `true` | +| `secondary.persistence.storageClass` | MySQL secondary persistent volume storage Class | `""` | +| `secondary.persistence.annotations` | MySQL secondary persistent volume claim annotations | `{}` | +| `secondary.persistence.accessModes` | MySQL secondary persistent volume access Modes | `["ReadWriteOnce"]` | +| `secondary.persistence.size` | MySQL secondary persistent volume size | `8Gi` | +| `secondary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `secondary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL secondary pod(s) | `[]` | +| `secondary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) | `[]` | +| `secondary.initContainers` | Add additional init containers for the MySQL secondary pod(s) | `[]` | +| `secondary.sidecars` | Add additional sidecar containers for the MySQL secondary pod(s) | `[]` | +| `secondary.service.type` | MySQL secondary Kubernetes service type | `ClusterIP` | +| `secondary.service.port` | MySQL secondary Kubernetes service port | `3306` | +| `secondary.service.nodePort` | MySQL secondary Kubernetes service node port | `""` | +| `secondary.service.clusterIP` | MySQL secondary Kubernetes service clusterIP IP | `""` | +| `secondary.service.loadBalancerIP` | MySQL secondary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `secondary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `secondary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL secondary service is LoadBalancer | `[]` | +| `secondary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `secondary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL secondary pods | `false` | +| `secondary.pdb.minAvailable` | Minimum number/percentage of MySQL secondary pods that should remain scheduled | `1` | +| `secondary.pdb.maxUnavailable` | Maximum number/percentage of MySQL secondary pods that may be made unavailable | `""` | +| `secondary.podLabels` | Additional pod labels for MySQL secondary pods | `{}` | + + +### RBAC parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MySQL pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.annotations` | Annotations for MySQL Service Account | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Network Policy + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | The Policy model to apply. | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r408` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources` | Init container volume-permissions resources | `{}` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-10-r52` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | +| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | +| `metrics.service.annotations` | Prometheus exporter service annotations | `{}` | +| `metrics.extraArgs.primary` | Extra args to be passed to mysqld_exporter on Primary pods | `[]` | +| `metrics.extraArgs.secondary` | Extra args to be passed to mysqld_exporter on Secondary pods | `[]` | +| `metrics.resources.limits` | The resources limits for MySQL prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for MySQL prometheus exporter containers | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | + + +The above parameters map to the env variables defined in [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql). For more information please refer to the [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql) image documentation. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, ```bash $ helm install my-release \ - --set root.password=secretpassword,user.database=app_database \ + --set auth.rootPassword=secretpassword,auth.database=app_database \ bitnami/mysql ``` The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a database named `app_database`. +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash @@ -197,64 +354,115 @@ It is strongly recommended to use immutable tags in a production environment. Th Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. -### Production configuration +### Use a different MySQL version -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/change-image-version/). -- Force users to specify a password: -```diff -- root.forcePassword: false -+ root.forcePassword: true +### Customize a new MySQL instance -- db.forcePassword: false -+ db.forcePassword: true - -- replication.forcePassword: false -+ replication.forcePassword: true -``` - -- Desired number of slave replicas: -```diff -- slave.replicas: 1 -+ slave.replicas: 2 -``` - -- Start a side-car prometheus exporter: -```diff -- metrics.enabled: false -+ metrics.enabled: true -``` - -### Initialize a fresh instance - -The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. +The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. The allowed extensions are `.sh`, `.sql` and `.sql.gz`. +These scripts are treated differently depending on their extension. While `.sh` scripts are executed on all the nodes, `.sql` and `.sql.gz` scripts are only executed on the primary nodes. This is because `.sh` scripts support conditional tests to identify the type of node they are running on, while such tests are not supported in `.sql` or `sql.gz` files. + +Refer to the [chart documentation for more information and a usage example](http://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/customize-new-instance/). + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MySQL, you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + ## Persistence The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can be defined. +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. -### Adjust permissions of persistent volume mountpoint +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). -As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. +## Network Policy -By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. -As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. +To enable network policy for MySQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. -You can enable this initContainer by setting `volumePermissions.enabled` to `true`. +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 3306. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to MySQL. +This label will be displayed in the output of a successful install. + +## Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading -It's necessary to set the `root.password` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: +It's necessary to set the `auth.rootPassword` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: ```bash -$ helm upgrade my-release bitnami/mysql --set root.password=[ROOT_PASSWORD] +$ helm upgrade my-release bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] ``` -| Note: you need to substitue the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 8.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The terms *master* and *slave* have been replaced by the terms *primary* and *secondary*. Therefore, parameters prefixed with `master` or `slave` are now prefixed with `primary` or `secondary`, respectively. + - Credentials parameters are reorganized under the `auth` parameter. + - `replication.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MySQL chart, and migrate the data from your previous release. You have 2 alternatives to do so: + - Create a backup of the database, and restore it on the new release using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html). + - Reuse the PVC used to hold the master data on your previous release. To do so, use the `primary.persistence.existingClaim` parameter. The following example assumes that the release name is `mysql`: + +```bash +$ helm install mysql bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] --set primary.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[ROOT_PASSWORD]_ with the root password used in your previous release. + +### To 7.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mysql/administration/upgrade-helm3/). ### To 3.0.0 @@ -265,3 +473,19 @@ Use the workaround below to upgrade from versions previous to 3.0.0. The followi $ kubectl delete statefulset mysql-master --cascade=false $ kubectl delete statefulset mysql-slave --cascade=false ``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/lib/common/services/mysql/charts/common/.helmignore b/lib/common/services/mysql/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/lib/common/services/mysql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/lib/common/services/mysql/charts/common/Chart.yaml b/lib/common/services/mysql/charts/common/Chart.yaml new file mode 100644 index 00000000..e8d2db9d --- /dev/null +++ b/lib/common/services/mysql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.1 diff --git a/lib/common/services/mysql/charts/common/README.md b/lib/common/services/mysql/charts/common/README.md new file mode 100644 index 00000000..88d13b1d --- /dev/null +++ b/lib/common/services/mysql/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/lib/common/services/mysql/charts/common/templates/_affinities.tpl b/lib/common/services/mysql/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..189ea403 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_capabilities.tpl b/lib/common/services/mysql/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..4ec8321e --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_errors.tpl b/lib/common/services/mysql/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..a79cc2e3 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_images.tpl b/lib/common/services/mysql/charts/common/templates/_images.tpl new file mode 100644 index 00000000..42ffbc72 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_ingress.tpl b/lib/common/services/mysql/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..8caf73a6 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_labels.tpl b/lib/common/services/mysql/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..252066c7 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_names.tpl b/lib/common/services/mysql/charts/common/templates/_names.tpl new file mode 100644 index 00000000..c8574d17 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/lib/common/services/mysql/charts/common/templates/_secrets.tpl b/lib/common/services/mysql/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a53fb44f --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_storage.tpl b/lib/common/services/mysql/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..60e2a844 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl b/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..2db16685 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_utils.tpl b/lib/common/services/mysql/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..ea083a24 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/_warnings.tpl b/lib/common/services/mysql/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..ae10fa41 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl b/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..ded1ae3b --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl b/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..b6906ff7 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl b/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..a071ea4d --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl b/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..164ec0d0 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl b/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..5d72959b --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl b/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..9a814cf4 --- /dev/null +++ b/lib/common/services/mysql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/lib/common/services/mysql/charts/common/values.yaml b/lib/common/services/mysql/charts/common/values.yaml new file mode 100644 index 00000000..f2df68e5 --- /dev/null +++ b/lib/common/services/mysql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/lib/common/services/mysql/ci/values-production-with-rbac.yaml b/lib/common/services/mysql/ci/values-production-with-rbac.yaml new file mode 100644 index 00000000..d3370c93 --- /dev/null +++ b/lib/common/services/mysql/ci/values-production-with-rbac.yaml @@ -0,0 +1,30 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +architecture: replication +auth: + usePasswordFiles: true + +primary: + extraEnvVars: + - name: TEST + value: "3" + podDisruptionBudget: + create: true + +secondary: + replicaCount: 2 + extraEnvVars: + - name: TEST + value: "2" + podDisruptionBudget: + create: true + +serviceAccount: + create: true + name: mysql-service-account +rbac: + create: true + +metrics: + enabled: true diff --git a/lib/common/services/mysql/ci/values-production.yaml b/lib/common/services/mysql/ci/values-production.yaml deleted file mode 100644 index 072fd062..00000000 --- a/lib/common/services/mysql/ci/values-production.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Test values file for generating all of the yaml and check that -# the rendering is correct - -volumePermissions: - enabled: true - -master: - extraEnvVars: - - name: TEST - value: "3" - - extraEnvVarsSecret: example-secret - extraEnvVarsCM: example-cm - -slave: - extraEnvVars: - - name: TEST - value: "2" - - extraEnvVarsSecret: example-secret-2 - extraEnvVarsCM: example-cm-2 - replicas: 2 - -metrics: - enabled: true - ## Kubeval doesn't recognise ServiceMonitor as a valid K8s object - # serviceMonitor: - # enabled: true - diff --git a/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md b/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md deleted file mode 100644 index c7257d74..00000000 --- a/lib/common/services/mysql/files/docker-entrypoint-initdb.d/README.md +++ /dev/null @@ -1,3 +0,0 @@ -You can copy here your custom .sh, .sql or .sql.gz file so they are executed during the first boot of the image. - -More info in the [bitnami-docker-mysql](https://github.com/bitnami/bitnami-docker-mysql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/lib/common/services/mysql/templates/_helpers.tpl b/lib/common/services/mysql/templates/_helpers.tpl index bc5933ff..98b23466 100644 --- a/lib/common/services/mysql/templates/_helpers.tpl +++ b/lib/common/services/mysql/templates/_helpers.tpl @@ -1,97 +1,43 @@ {{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "mysql.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "mysql.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- define "mysql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} +{{- printf "%s-%s" (include "common.names.fullname" .) "primary" | trunc 63 | trimSuffix "-" -}} {{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} +{{- include "common.names.fullname" . -}} {{- end -}} {{- end -}} -{{- define "mysql.master.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- printf "%s-%s" .Values.fullnameOverride "master" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name "master" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name "master" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "mysql.slave.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- printf "%s-%s" .Values.fullnameOverride "slave" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name "slave" | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name "slave" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "mysql.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "mysql.labels" -}} -app: {{ include "mysql.name" . }} -chart: {{ include "mysql.chart" . }} -release: {{ .Release.Name }} -heritage: {{ .Release.Service }} -{{- end -}} - -{{/* -Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector -*/}} -{{- define "mysql.matchLabels" -}} -app: {{ include "mysql.name" . }} -release: {{ .Release.Name }} +{{- define "mysql.secondary.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "secondary" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Return the proper MySQL image name */}} {{- define "mysql.image" -}} -{{- $registryName := .Values.image.registry -}} -{{- $repositoryName := .Values.image.repository -}} -{{- $tag := .Values.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} {{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mysql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mysql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mysql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} {{- end -}} {{ template "mysql.initdbScriptsCM" . }} @@ -100,216 +46,12 @@ Get the initialization scripts ConfigMap name. */}} {{- define "mysql.initdbScriptsCM" -}} {{- if .Values.initdbScriptsConfigMap -}} -{{- printf "%s" .Values.initdbScriptsConfigMap -}} + {{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} {{- else -}} -{{- printf "%s-init-scripts" (include "mysql.master.fullname" .) -}} + {{- printf "%s-init-scripts" (include "mysql.primary.fullname" .) -}} {{- end -}} {{- end -}} -{{/* -Return the proper MySQL metrics exporter image name -*/}} -{{- define "mysql.metrics.image" -}} -{{- $registryName := .Values.metrics.image.registry -}} -{{- $repositoryName := .Values.metrics.image.repository -}} -{{- $tag := .Values.metrics.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "mysql.imagePullSecrets" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -Also, we can not use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.metrics.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "mysql.volumePermissions.image" -}} -{{- $registryName := .Values.volumePermissions.image.registry -}} -{{- $repositoryName := .Values.volumePermissions.image.repository -}} -{{- $tag := .Values.volumePermissions.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class for the master -*/}} -{{- define "mysql.master.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.master.persistence.storageClass -}} - {{- if (eq "-" .Values.master.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Storage Class for the slave -*/}} -{{- define "mysql.slave.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- else -}} - {{- if .Values.slave.persistence.storageClass -}} - {{- if (eq "-" .Values.slave.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Renders a value that contains template. -Usage: -{{ include "mysql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "mysql.tplValue" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} - -{{/* -Compile all warnings into a single message, and call fail. -*/}} -{{- define "mysql.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "mysql.validateValues.loadBalancerIPareNotEquals" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} -{{- end -}} -{{- end -}} - -{{/* Validate values of MySql - must provide different IPs */}} -{{- define "mysql.validateValues.loadBalancerIPareNotEquals" -}} -{{- if not (empty .Values.service.loadBalancerIP) -}} -{{- if eq (.Values.service.loadBalancerIP.master | quote) (.Values.service.loadBalancerIP.slave | quote) }} -mysql: service.loadBalancerIP - loadBalancerIP.master is equal to loadBalancerIP.slave which is not possible. - Please set a different ip for master and slave services. -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* Check if there are rolling tags in the images */}} -{{- define "mysql.checkRollingTags" -}} -{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end -}} -{{- end -}} - - {{/* Returns the proper service account name depending if an explicit service account name is set in the values file. If the name is not set it will default to either mysql.fullname if serviceAccount.create @@ -317,15 +59,134 @@ WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.t */}} {{- define "mysql.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "mysql.fullname" .) .Values.serviceAccount.name }} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} {{/* -Returns chart secret name. If existingSecret is not set it will default to mysql.fullname +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.primary.createConfigmap" -}} +{{- if and .Values.primary.configuration (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.secondary.configmapName" -}} +{{- if .Values.secondary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.secondary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.secondary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.secondary.createConfigmap" -}} +{{- if and (eq .Values.architecture "replication") .Values.secondary.configuration (not .Values.secondary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MySQL credentials */}} {{- define "mysql.secretName" -}} -{{ default (include "mysql.fullname" .) .Values.existingSecret }} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MySQL +*/}} +{{- define "mysql.createSecret" -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "mysql.root.password" -}} + {{- if not (empty .Values.auth.rootPassword) }} + {{- .Values.auth.rootPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-root-password") }} + {{- else }} + {{- required "A MySQL Root Password is required!" .Values.auth.rootPassword }} + {{- end }} +{{- end -}} + +{{- define "mysql.password" -}} + {{- if and (not (empty .Values.auth.username)) (not (empty .Values.auth.password)) }} + {{- .Values.auth.password }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-password") }} + {{- else }} + {{- required "A MySQL Database Password is required!" .Values.auth.password }} + {{- end }} +{{- end -}} + +{{- define "mysql.replication.password" -}} + {{- if not (empty .Values.auth.replicationPassword) }} + {{- .Values.auth.replicationPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-replication-password") }} + {{- else }} + {{- required "A MySQL Replication Password is required!" .Values.auth.replicationPassword }} + {{- end }} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mysql.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mysql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} {{- end -}} diff --git a/lib/common/services/mysql/templates/extra-list.yaml b/lib/common/services/mysql/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/lib/common/services/mysql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/lib/common/services/mysql/templates/initialization-configmap.yaml b/lib/common/services/mysql/templates/initialization-configmap.yaml deleted file mode 100644 index 6bf5689b..00000000 --- a/lib/common/services/mysql/templates/initialization-configmap.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.master.fullname" . }}-init-scripts - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz") (not .Values.initdbScriptsConfigMap) }} -binaryData: -{{- $root := . }} -{{- range $path, $bytes := .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} - {{ base $path }}: {{ $root.Files.Get $path | b64enc | quote }} -{{- end }} -{{- end }} -data: -{{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}") (not .Values.initdbScriptsConfigMap) }} -{{ (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}").AsConfig | indent 2 }} -{{- end }} -{{- with .Values.initdbScripts }} -{{ toYaml . | indent 2 }} -{{- end }} -{{- end }} diff --git a/lib/common/services/mysql/templates/master-configmap.yaml b/lib/common/services/mysql/templates/master-configmap.yaml deleted file mode 100644 index fdbbfe6f..00000000 --- a/lib/common/services/mysql/templates/master-configmap.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.master.config }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.master.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -data: - my.cnf: |- -{{ .Values.master.config | indent 4 }} -{{- end -}} diff --git a/lib/common/services/mysql/templates/master-statefulset.yaml b/lib/common/services/mysql/templates/master-statefulset.yaml deleted file mode 100644 index eb31d015..00000000 --- a/lib/common/services/mysql/templates/master-statefulset.yaml +++ /dev/null @@ -1,293 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "mysql.master.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master -{{- if .Values.commonLabels }} - {{- toYaml .Values.commonLabels | nindent 4 }} -{{- end }} -spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: master - serviceName: {{ template "mysql.master.fullname" . }} - replicas: 1 - updateStrategy: - type: {{ .Values.master.updateStrategy.type }} - {{- if (eq "Recreate" .Values.master.updateStrategy.type) }} - rollingUpdate: null - {{- end }} - template: - metadata: - labels: {{- include "mysql.labels" . | nindent 8 }} - component: master -{{- if .Values.commonLabels }} - {{- toYaml .Values.commonLabels | nindent 8 }} -{{- end }} - {{- if .Values.master.podAnnotations }} - annotations: {{ include "mysql.tplValue" ( dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} - {{- end }} - spec: -{{- include "mysql.imagePullSecrets" . | indent 6 }} - {{- if .Values.master.affinity }} - affinity: {{- include "mysql.tplValue" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.nodeSelector }} - nodeSelector: {{- include "mysql.tplValue" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: {{- include "mysql.tplValue" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.master.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.master.securityContext.fsGroup }} - runAsUser: {{ .Values.master.securityContext.runAsUser }} - {{- end }} - serviceAccountName: {{ template "mysql.serviceAccountName" . }} - {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} - initContainers: - - name: volume-permissions - image: {{ template "mysql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: - - /bin/bash - - -ec - - | - chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} {{ .Values.master.persistence.mountPath }} - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.master.persistence.mountPath }} - {{- end }} - containers: - - name: mysql - image: {{ template "mysql.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - {{- if .Values.master.containerSecurityContext }} - securityContext: {{- toYaml .Values.master.containerSecurityContext | nindent 12 }} - {{- end }} - env: - - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" .Values.image.debug | quote }} - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - {{- if .Values.db.user }} - - name: MYSQL_USER - value: {{ .Values.db.user | quote }} - {{- if .Values.db.injectSecretsAsVolume }} - - name: MYSQL_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-password" - {{- else }} - - name: MYSQL_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-password - {{- end }} - {{- end }} - - name: MYSQL_DATABASE - value: {{ .Values.db.name | quote }} - {{- if .Values.replication.enabled }} - - name: MYSQL_REPLICATION_MODE - value: "master" - - name: MYSQL_REPLICATION_USER - value: {{ .Values.replication.user | quote }} - {{- if .Values.replication.injectSecretsAsVolume }} - - name: MYSQL_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-replication-password" - {{- else }} - - name: MYSQL_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-replication-password - {{- end }} - {{- end }} - {{- if .Values.master.extraEnvVars }} - {{- include "mysql.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} - envFrom: - {{- if .Values.master.extraEnvVarsCM }} - - configMapRef: - name: {{ .Values.master.extraEnvVarsCM }} - {{- end }} - {{- if .Values.master.extraEnvVarsSecret }} - - secretRef: - name: {{ .Values.master.extraEnvVarsSecret }} - {{- end }} - {{- end }} - ports: - - name: mysql - containerPort: 3306 - {{- if .Values.master.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.master.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.resources }} - resources: {{- toYaml .Values.master.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.master.persistence.mountPath }} - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - {{- end }} - {{- if .Values.master.config }} - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - mountPath: /opt/bitnami/mysql/secrets/ - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "mysql.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - env: - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - command: - - /bin/sh - - -c - - | - password_aux="${MYSQL_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_ROOT_PASSWORD_FILE) - fi - DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter - ports: - - name: metrics - containerPort: 9104 - livenessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 15 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 5 - timeoutSeconds: 1 - {{- if .Values.metrics.resources }} - resources: {{- toYaml .Values.metrics.resources | nindent 12 }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume }} - volumeMounts: - - name: mysql-credentials - mountPath: /opt/bitnami/mysqld-exporter/secrets/ - {{- end }} - {{- end }} - volumes: - {{- if .Values.master.config }} - - name: config - configMap: - name: {{ template "mysql.master.fullname" . }} - {{- end }} - {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} - - name: custom-init-scripts - configMap: - name: {{ template "mysql.initdbScriptsCM" . }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - secret: - secretName: {{ template "mysql.fullname" . }} - items: - {{- if .Values.db.injectSecretsAsVolume }} - - key: mysql-password - path: mysql-password - {{- end }} - {{- if .Values.root.injectSecretsAsVolume }} - - key: mysql-root-password - path: mysql-root-password - {{- end }} - {{- if .Values.replication.injectSecretsAsVolume }} - - key: mysql-replication-password - path: mysql-replication-password - {{- end }} - {{- end }} -{{- if not .Values.master.persistence.enabled }} - - name: "data" - emptyDir: {} -{{- else if and .Values.master.persistence.enabled .Values.master.persistence.existingClaim }} - - name: "data" - persistentVolumeClaim: - claimName: {{ .Values.master.persistence.existingClaim }} -{{- else if and .Values.master.persistence.enabled (not .Values.master.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - labels: - app: {{ template "mysql.name" . }} - component: master - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.master.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.master.persistence.size | quote }} - {{ include "mysql.master.storageClass" . }} -{{- end }} diff --git a/lib/common/services/mysql/templates/master-svc.yaml b/lib/common/services/mysql/templates/master-svc.yaml deleted file mode 100644 index 790f2df5..00000000 --- a/lib/common/services/mysql/templates/master-svc.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.service.name }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: master - {{- if or .Values.service.annotations .Values.metrics.service.annotations }} - annotations: - {{- if .Values.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.metrics.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} - {{- if not (empty .Values.service.loadBalancerIP.master) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP.master }} - {{- end }} - {{- end }} - ports: - - name: mysql - port: {{ .Values.service.port }} - targetPort: mysql - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} - nodePort: {{ .Values.service.nodePort.master }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - port: {{ .Values.metrics.service.port }} - targetPort: metrics - {{- end }} - selector: {{- include "mysql.matchLabels" . | nindent 4 }} - component: master diff --git a/lib/common/services/mysql/templates/metrics-svc.yaml b/lib/common/services/mysql/templates/metrics-svc.yaml new file mode 100644 index 00000000..fb0d9d76 --- /dev/null +++ b/lib/common/services/mysql/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} +{{- end }} diff --git a/lib/common/services/mysql/templates/networkpolicy.yaml b/lib/common/services/mysql/templates/networkpolicy.yaml new file mode 100644 index 00000000..a0d1d01d --- /dev/null +++ b/lib/common/services/mysql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.primary.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes + - ports: + - port: 9104 + {{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/primary/configmap.yaml b/lib/common/services/mysql/templates/primary/configmap.yaml new file mode 100644 index 00000000..540b7b90 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.primary.configuration | nindent 4 }} +{{- end -}} diff --git a/lib/common/services/mysql/templates/primary/initialization-configmap.yaml b/lib/common/services/mysql/templates/primary/initialization-configmap.yaml new file mode 100644 index 00000000..83cbaea7 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mysql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{ end }} diff --git a/lib/common/services/mysql/templates/primary/pdb.yaml b/lib/common/services/mysql/templates/primary/pdb.yaml new file mode 100644 index 00000000..106ad520 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.primary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if .Values.primary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/lib/common/services/mysql/templates/primary/statefulset.yaml b/lib/common/services/mysql/templates/primary/statefulset.yaml new file mode 100644 index 00000000..6f9c99ea --- /dev/null +++ b/lib/common/services/mysql/templates/primary/statefulset.yaml @@ -0,0 +1,368 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + serviceName: {{ include "mysql.primary.fullname" . }} + updateStrategy: + type: {{ .Values.primary.updateStrategy }} + {{- if (eq "Recreate" .Values.primary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.primary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.primary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mysql.serviceAccountName" . }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.primary.initContainers (and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled) }} + initContainers: + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: MYSQL_USER + value: {{ .Values.auth.username | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-password" .Values.auth.customPasswordFiles.user }} + {{- else }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-password + {{- end }} + {{- end }} + - name: MYSQL_DATABASE + value: {{ .Values.auth.database | quote }} + {{- if eq .Values.architecture "replication" }} + - name: MYSQL_REPLICATION_MODE + value: "master" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.primary.extraFlags }}" + {{- end }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.primary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.primary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- omit .Values.primary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{ toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.primary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "mysql.initdbScriptsCM" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ include "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-password + path: mysql-password + {{- if eq .Values.architecture "replication" }} + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim . }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if and .Values.primary.persistence.enabled (not .Values.primary.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: primary + {{- if .Values.primary.persistence.annotations }} + annotations: + {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/lib/common/services/mysql/templates/primary/svc-headless.yaml b/lib/common/services/mysql/templates/primary/svc-headless.yaml new file mode 100644 index 00000000..49e6e579 --- /dev/null +++ b/lib/common/services/mysql/templates/primary/svc-headless.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/lib/common/services/mysql/templates/primary/svc.yaml b/lib/common/services/mysql/templates/primary/svc.yaml new file mode 100644 index 00000000..b46e6faa --- /dev/null +++ b/lib/common/services/mysql/templates/primary/svc.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.primary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) .Values.primary.service.nodePort) }} + nodePort: {{ .Values.primary.service.nodePort }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/lib/common/services/mysql/templates/role.yaml b/lib/common/services/mysql/templates/role.yaml new file mode 100644 index 00000000..4cbdd5c9 --- /dev/null +++ b/lib/common/services/mysql/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- end }} diff --git a/lib/common/services/mysql/templates/rolebinding.yaml b/lib/common/services/mysql/templates/rolebinding.yaml new file mode 100644 index 00000000..90ede32f --- /dev/null +++ b/lib/common/services/mysql/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "mysql.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . -}} +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/configmap.yaml b/lib/common/services/mysql/templates/secondary/configmap.yaml new file mode 100644 index 00000000..682e3e19 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.secondary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.secondary.configuration | nindent 4 }} +{{- end -}} diff --git a/lib/common/services/mysql/templates/secondary/pdb.yaml b/lib/common/services/mysql/templates/secondary/pdb.yaml new file mode 100644 index 00000000..49c7e167 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replication") .Values.secondary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.secondary.pdb.minAvailable }} + minAvailable: {{ .Values.secondary.pdb.minAvailable }} + {{- end }} + {{- if .Values.secondary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.secondary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/statefulset.yaml b/lib/common/services/mysql/templates/secondary/statefulset.yaml new file mode 100644 index 00000000..ef196ebf --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/statefulset.yaml @@ -0,0 +1,338 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.secondary.replicaCount }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary + serviceName: {{ include "mysql.secondary.fullname" . }} + updateStrategy: + type: {{ .Values.secondary.updateStrategy }} + {{- if (eq "Recreate" .Values.secondary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.secondary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.secondary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.secondary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/secondary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.secondary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.secondary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mysql.serviceAccountName" . }} + {{- if .Values.secondary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAntiAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.secondary.nodeAffinityPreset.type "key" .Values.secondary.nodeAffinityPreset.key "values" .Values.secondary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.secondary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.secondary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.secondary.initContainers (and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled) }} + initContainers: + {{- if .Values.secondary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.secondary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.secondary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.secondary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MYSQL_REPLICATION_MODE + value: "slave" + - name: MYSQL_MASTER_HOST + value: {{ include "mysql.primary.fullname" . }} + - name: MYSQL_MASTER_PORT_NUMBER + value: {{ .Values.primary.service.port | quote }} + - name: MYSQL_MASTER_ROOT_USER + value: "root" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.secondary.extraFlags }}" + {{- end }} + {{- if .Values.secondary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.secondary.extraEnvVarsCM .Values.secondary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.secondary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.secondary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.secondary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.secondary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.secondary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.secondary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.secondary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.startupProbe.enabled }} + startupProbe: {{- omit .Values.secondary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.secondary.resources }} + resources: {{ toYaml .Values.secondary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.secondary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.secondary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.secondary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.secondary.configmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ template "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.secondary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: secondary + {{- if .Values.secondary.persistence.annotations }} + annotations: + {{- toYaml .Values.secondary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.secondary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.secondary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.secondary.persistence "global" .Values.global) }} + {{- if .Values.secondary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/svc-headless.yaml b/lib/common/services/mysql/templates/secondary/svc-headless.yaml new file mode 100644 index 00000000..703d8e74 --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/svc-headless.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secondary/svc.yaml b/lib/common/services/mysql/templates/secondary/svc.yaml new file mode 100644 index 00000000..74a4c6ef --- /dev/null +++ b/lib/common/services/mysql/templates/secondary/svc.yaml @@ -0,0 +1,43 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.secondary.service.type }} + {{- if and (eq .Values.secondary.service.type "ClusterIP") .Values.secondary.service.clusterIP }} + clusterIP: {{ .Values.secondary.service.clusterIP }} + {{- end }} + {{- if and .Values.secondary.service.loadBalancerIP (eq .Values.secondary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.secondary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") .Values.secondary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.secondary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.secondary.service.type "NodePort") (eq .Values.secondary.service.type "LoadBalancer")) .Values.secondary.service.nodePort) }} + nodePort: {{ .Values.secondary.service.nodePort }} + {{- else if eq .Values.secondary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/lib/common/services/mysql/templates/secrets.yaml b/lib/common/services/mysql/templates/secrets.yaml index 888cd5e0..9412fc35 100644 --- a/lib/common/services/mysql/templates/secrets.yaml +++ b/lib/common/services/mysql/templates/secrets.yaml @@ -1,34 +1,21 @@ -{{- if (not .Values.existingSecret) -}} +{{- if eq (include "mysql.createSecret" .) "true" }} apiVersion: v1 kind: Secret metadata: - name: {{ template "mysql.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} type: Opaque data: - {{- if .Values.root.password }} - mysql-root-password: {{ .Values.root.password | b64enc | quote }} - {{- else if (not .Values.root.forcePassword) }} - mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }} - {{ else }} - mysql-root-password: {{ required "A MySQL Root Password is required!" .Values.root.password }} - {{- end }} - {{- if .Values.db.user }} - {{- if .Values.db.password }} - mysql-password: {{ .Values.db.password | b64enc | quote }} - {{- else if (not .Values.db.forcePassword) }} - mysql-password: {{ randAlphaNum 10 | b64enc | quote }} - {{- else }} - mysql-password: {{ required "A MySQL Database Password is required!" .Values.db.password }} - {{- end }} - {{- end }} - {{- if .Values.replication.enabled }} - {{- if .Values.replication.password }} - mysql-replication-password: {{ .Values.replication.password | b64enc | quote }} - {{- else if (not .Values.replication.forcePassword) }} - mysql-replication-password: {{ randAlphaNum 10 | b64enc | quote }} - {{- else }} - mysql-replication-password: {{ required "A MySQL Replication Password is required!" .Values.replication.password }} - {{- end }} + mysql-root-password: {{ include "mysql.root.password" . | b64enc | quote }} + mysql-password: {{ include "mysql.password" . | b64enc | quote }} + {{- if eq .Values.architecture "replication" }} + mysql-replication-password: {{ include "mysql.replication.password" . | b64enc | quote }} {{- end }} {{- end }} diff --git a/lib/common/services/mysql/templates/serviceaccount.yaml b/lib/common/services/mysql/templates/serviceaccount.yaml index ebde86c7..59eb1040 100644 --- a/lib/common/services/mysql/templates/serviceaccount.yaml +++ b/lib/common/services/mysql/templates/serviceaccount.yaml @@ -2,12 +2,21 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "mysql.serviceAccountName" . }} - labels: - app: {{ template "mysql.name" . }} - chart: {{ template "mysql.chart" . }} - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" + name: {{ include "mysql.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- if (not .Values.auth.customPasswordFiles) }} secrets: - name: {{ template "mysql.secretName" . }} - {{- end }} +{{- end }} +{{- end }} diff --git a/lib/common/services/mysql/templates/servicemonitor.yaml b/lib/common/services/mysql/templates/servicemonitor.yaml index 6dad49f0..f082dd54 100644 --- a/lib/common/services/mysql/templates/servicemonitor.yaml +++ b/lib/common/services/mysql/templates/servicemonitor.yaml @@ -2,18 +2,23 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "mysql.fullname" . }} + name: {{ include "common.names.fullname" . }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} {{- end }} - labels: {{- include "mysql.labels" . | nindent 4 }} - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: master endpoints: - port: metrics {{- if .Values.metrics.serviceMonitor.interval }} @@ -22,7 +27,16 @@ spec: {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} namespaceSelector: matchNames: - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics {{- end }} diff --git a/lib/common/services/mysql/templates/slave-configmap.yaml b/lib/common/services/mysql/templates/slave-configmap.yaml deleted file mode 100644 index fbaeb78c..00000000 --- a/lib/common/services/mysql/templates/slave-configmap.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if and .Values.replication.enabled .Values.slave.config }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave -data: - my.cnf: |- -{{ .Values.slave.config | indent 4 }} -{{- end }} diff --git a/lib/common/services/mysql/templates/slave-statefulset.yaml b/lib/common/services/mysql/templates/slave-statefulset.yaml deleted file mode 100644 index f8a78dd3..00000000 --- a/lib/common/services/mysql/templates/slave-statefulset.yaml +++ /dev/null @@ -1,262 +0,0 @@ -{{- if .Values.replication.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave -spec: - selector: - matchLabels: {{- include "mysql.matchLabels" . | nindent 6 }} - component: slave - serviceName: "{{ template "mysql.slave.fullname" . }}" - replicas: {{ .Values.slave.replicas }} - updateStrategy: - type: {{ .Values.slave.updateStrategy.type }} - {{- if (eq "Recreate" .Values.slave.updateStrategy.type) }} - rollingUpdate: null - {{- end }} - template: - metadata: - labels: {{- include "mysql.labels" . | nindent 8 }} - component: slave - {{- if .Values.slave.podAnnotations }} - annotations: {{ include "mysql.tplValue" ( dict "value" .Values.slave.podAnnotations "context" $) | nindent 8 }} - {{- end }} - spec: -{{- include "mysql.imagePullSecrets" . | indent 6 }} - {{- if .Values.slave.affinity }} - affinity: {{- include "mysql.tplValue" (dict "value" .Values.slave.affinity "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.nodeSelector }} - nodeSelector: {{- include "mysql.tplValue" (dict "value" .Values.slave.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.tolerations }} - tolerations: {{- include "mysql.tplValue" (dict "value" .Values.slave.tolerations "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.slave.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.slave.securityContext.fsGroup }} - runAsUser: {{ .Values.slave.securityContext.runAsUser }} - {{- end }} - serviceAccountName: {{ template "mysql.serviceAccountName" . }} - {{- if and .Values.volumePermissions.enabled .Values.slave.persistence.enabled }} - initContainers: - - name: volume-permissions - image: {{ template "mysql.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - {{- if .Values.slave.containerSecurityContext }} - securityContext: {{- toYaml .Values.slave.containerSecurityContext | nindent 12 }} - {{- end }} - command: - - /bin/bash - - -ec - - | - chown -R {{ .Values.slave.securityContext.runAsUser }}:{{ .Values.slave.securityContext.fsGroup }} {{ .Values.slave.persistence.mountPath }} - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.slave.persistence.mountPath }} - {{- end }} - containers: - - name: mysql - image: {{ template "mysql.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - env: - - name: MYSQL_REPLICATION_MODE - value: "slave" - - name: MYSQL_MASTER_HOST - value: {{ template "mysql.fullname" . }} - - name: MYSQL_MASTER_PORT_NUMBER - value: "3306" - - name: MYSQL_MASTER_ROOT_USER - value: "root" - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_MASTER_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_MASTER_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - - name: MYSQL_REPLICATION_USER - value: "{{ .Values.replication.user }}" - {{- if .Values.replication.injectSecretsAsVolume }} - - name: MYSQL_REPLICATION_PASSWORD_FILE - value: "/opt/bitnami/mysql/secrets/mysql-replication-password" - {{- else }} - - name: MYSQL_REPLICATION_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-replication-password - {{- end }} - {{- if .Values.slave.extraEnvVars }} - {{- include "mysql.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} - envFrom: - {{- if .Values.slave.extraEnvVarsCM }} - - configMapRef: - name: {{ .Values.slave.extraEnvVarsCM }} - {{- end }} - {{- if .Values.slave.extraEnvVarsSecret }} - - secretRef: - name: {{ .Values.slave.extraEnvVarsSecret }} - {{- end }} - {{- end }} - ports: - - name: mysql - containerPort: 3306 - {{- if .Values.slave.livenessProbe.enabled }} - livenessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.slave.readinessProbe.enabled }} - readinessProbe: - exec: - command: - - sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - mysqladmin status -uroot -p$password_aux - initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} - {{- end }} - {{- if .Values.master.resources }} - resources: {{- toYaml .Values.slave.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: {{ .Values.slave.persistence.mountPath }} - {{- if .Values.slave.config }} - - name: config - mountPath: /opt/bitnami/mysql/conf/my.cnf - subPath: my.cnf - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - mountPath: /opt/bitnami/mysql/secrets/ - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - image: {{ template "mysql.metrics.image" . }} - imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} - env: - {{- if .Values.root.injectSecretsAsVolume }} - - name: MYSQL_MASTER_ROOT_PASSWORD_FILE - value: "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" - {{- else }} - - name: MYSQL_MASTER_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "mysql.secretName" . }} - key: mysql-root-password - {{- end }} - command: - - /bin/sh - - -c - - | - password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" - if [ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]; then - password_aux=$(cat $MYSQL_MASTER_ROOT_PASSWORD_FILE) - fi - DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter - ports: - - name: metrics - containerPort: 9104 - livenessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 15 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /metrics - port: metrics - initialDelaySeconds: 5 - timeoutSeconds: 1 - {{- if .Values.metrics.resources }} - resources: {{- toYaml .Values.metrics.resources | nindent 12 }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume }} - volumeMounts: - - name: mysql-credentials - mountPath: /opt/bitnami/mysqld-exporter/secrets/ - {{- end }} - {{- end }} - volumes: - {{- if .Values.slave.config }} - - name: config - configMap: - name: {{ template "mysql.slave.fullname" . }} - {{- end }} - {{- if or .Values.root.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }} - - name: mysql-credentials - secret: - secretName: {{ template "mysql.fullname" . }} - items: - {{- if .Values.root.injectSecretsAsVolume }} - - key: mysql-root-password - path: mysql-root-password - {{- end }} - {{- if .Values.replication.injectSecretsAsVolume }} - - key: mysql-replication-password - path: mysql-replication-password - {{- end }} - {{- end }} -{{- if not .Values.slave.persistence.enabled }} - - name: "data" - emptyDir: {} -{{- else if and .Values.slave.persistence.enabled .Values.slave.persistence.existingClaim }} - - name: "data" - persistentVolumeClaim: - claimName: {{ .Values.slave.persistence.existingClaim }} -{{- else if and .Values.slave.persistence.enabled (not .Values.slave.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - labels: - app: {{ template "mysql.name" . }} - component: slave - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.slave.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.slave.persistence.size | quote }} - {{ include "mysql.slave.storageClass" . }} -{{- end }} -{{- end }} diff --git a/lib/common/services/mysql/templates/slave-svc.yaml b/lib/common/services/mysql/templates/slave-svc.yaml deleted file mode 100644 index 450b00b7..00000000 --- a/lib/common/services/mysql/templates/slave-svc.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.replication.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "mysql.slave.fullname" . }} - labels: {{- include "mysql.labels" . | nindent 4 }} - component: slave - {{- if or .Values.service.annotations .Values.metrics.service.annotations }} - annotations: - {{- if .Values.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.metrics.service.annotations }} - {{- include "mysql.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} - {{- if not (empty .Values.service.loadBalancerIP.slave) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP.slave }} - {{- end }} - {{- end }} - ports: - - name: mysql - port: {{ .Values.service.port }} - targetPort: mysql - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} - nodePort: {{ .Values.service.nodePort.slave }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.metrics.enabled }} - - name: metrics - port: {{ .Values.metrics.service.port }} - targetPort: metrics - {{- end }} - selector: {{- include "mysql.matchLabels" . | nindent 4 }} - component: slave -{{- end }} diff --git a/lib/common/services/mysql/values-production.yaml b/lib/common/services/mysql/values-production.yaml deleted file mode 100644 index 303d981b..00000000 --- a/lib/common/services/mysql/values-production.yaml +++ /dev/null @@ -1,304 +0,0 @@ -## Admin (root) credentials -## -root: - ## MySQL admin password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount admin password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## Custom user/db credentials -## -db: - ## MySQL username and password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run - ## Note that this user should be different from the MySQL replication user (replication.user) - ## - user: - password: - ## Database to create - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run - ## - name: my_database - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## Replication configuration -## -replication: - ## Enable replication. This enables the creation of replicas of MySQL. If false, only a - ## master deployment would be created - ## - enabled: true - ## - ## MySQL replication user - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## Note that this user should be different from the MySQL user (db.user) - ## - user: replicator - ## MySQL replication user password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: true - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: true - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." -# -## ConfigMap with scripts to be run at first boot -## Note: This will override initdbScripts -# initdbScriptsConfigMap: - -## Slave nodes parameters -## -slave: - ## Number of slave replicas - ## - replicas: 2 - - ## Configure MySQL slave with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for slave nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL slave pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL slave containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL slave containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL slave containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on slave nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## MySQL prometheus metrics parameters -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint -## -metrics: - enabled: true - ## Bitnami MySQL Prometheus exporter image - ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ - ## - image: - registry: docker.io - repository: bitnami/mysqld-exporter - tag: 0.12.1-debian-10-r127 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## MySQL Prometheus exporter containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 0.5 - # memory: 256Mi - requests: {} - # cpu: 0.5 - # memory: 256Mi - - ## MySQL Prometheus exporter service parameters - ## - service: - type: ClusterIP - port: 9104 - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.port }}" - - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - enabled: false - ## Namespace in which Prometheus is running - ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## - # selector: - # prometheus: my-prometheus diff --git a/lib/common/services/mysql/values.schema.json b/lib/common/services/mysql/values.schema.json new file mode 100644 index 00000000..8021a460 --- /dev/null +++ b/lib/common/services/mysql/values.schema.json @@ -0,0 +1,178 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MySQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "required": ["database", "username", "password"], + "properties": { + "rootPassword": { + "type": "string", + "title": "MySQL root password", + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "database": { + "type": "string", + "title": "MySQL custom database name" + }, + "username": { + "type": "string", + "title": "MySQL custom username" + }, + "password": { + "type": "string", + "title": "MySQL custom password" + }, + "replicationUser": { + "type": "string", + "title": "MySQL replication username" + }, + "replicationPassword": { + "type": "string", + "title": "MySQL replication password" + } + } + }, + "primary": { + "type": "object", + "title": "Primary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL primary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL primary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "primary/persistence/enabled" + } + } + } + } + } + }, + "secondary": { + "type": "object", + "title": "Secondary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL secondary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL secondary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "secondary/persistence/enabled" + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/lib/common/services/mysql/values.yaml b/lib/common/services/mysql/values.yaml index 7cc01845..3ff7a0e8 100644 --- a/lib/common/services/mysql/values.yaml +++ b/lib/common/services/mysql/values.yaml @@ -1,553 +1,900 @@ +## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets [array] Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) ## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Cluster domain +## +clusterDomain: cluster.local +## @param commonAnnotations [object] Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} +## @param commonLabels [object] Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param extraDeploy [array] Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MySQL common parameters ## Bitnami MySQL image ## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## @param image.registry MySQL image registry +## @param image.repository MySQL image repository +## @param image.tag MySQL image tag (immutable tags are recommended) +## @param image.pullPolicy MySQL image pull policy +## @param image.pullSecrets [array] Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled ## image: registry: docker.io repository: bitnami/mysql - tag: 8.0.20-debian-10-r37 + tag: 8.0.29-debian-10-r2 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName + pullSecrets: [] ## Set to true if you would like to see extra information on logs - ## It turns BASH debugging in minideb-extras-base + ## It turns BASH and/or NAMI debugging in the image ## debug: false - -## String to partially override mysql.fullname template (will maintain the release name) +## @param architecture MySQL architecture (`standalone` or `replication`) ## -# nameOverride: - -## String to fully override mysql.fullname template +architecture: standalone +## MySQL Authentication parameters ## -# fullnameOverride: - -## Cluster domain +auth: + ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## @param auth.database Name for a custom database to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-on-first-run + ## + database: my_database + ## @param auth.username Name for a custom user to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-user-on-first-run + ## + username: "" + ## @param auth.password Password for the new user. Ignored if existing secret is provided + ## + password: "" + ## @param auth.replicationUser MySQL replication user + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + replicationUser: replicator + ## @param auth.replicationPassword MySQL replication user password. Ignored if existing secret is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` + ## NOTE: When it's set the auth.rootPassword, auth.password, auth.replicationPassword are ignored. + ## + existingSecret: "" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## @param auth.customPasswordFiles [object] Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mysql-root + ## user: /vault/secrets/mysql-user + ## replicator: /vault/secrets/mysql-replicator + ## + customPasswordFiles: {} +## @param initdbScripts [object] Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." ## -clusterDomain: cluster.local +initdbScripts: {} +## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsConfigMap: "" + +## @section MySQL Primary parameters + +primary: + ## @param primary.command [array] Override default container command on MySQL Primary container(s) (useful when using custom images) + ## + command: [] + ## @param primary.args [array] Override default container args on MySQL Primary container(s) (useful when using custom images) + ## + args: [] + ## @param primary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.configuration [string] Configure MySQL Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param primary.updateStrategy Update strategy type for the MySQL primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param primary.rollingUpdatePartition Partition update strategy for MySQL Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param primary.podAnnotations [object] Additional pod annotations for MySQL primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param primary.podAffinityPreset MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## MySQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key MySQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values [array] MySQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity [object] Affinity for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector [object] Node labels for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations [array] Tolerations for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param primary.podSecurityContext.enabled Enable security context for MySQL primary pods + ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param primary.containerSecurityContext.enabled MySQL primary container securityContext + ## @param primary.containerSecurityContext.runAsUser User ID for the MySQL primary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL primary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param primary.resources.limits [object] The resources limits for MySQL primary containers + ## @param primary.resources.requests [object] The requested resources for MySQL primary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.readinessProbe.enabled Enable readinessProbe + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.startupProbe.enabled Enable startupProbe + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 10 + successThreshold: 1 + ## @param primary.customLivenessProbe [object] Override default liveness probe for MySQL primary containers + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe [object] Override default readiness probe for MySQL primary containers + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe [object] Override default startup probe for MySQL primary containers + ## + customStartupProbe: {} + ## @param primary.extraFlags MySQL primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param primary.extraEnvVars [array] Extra environment variables to be set on MySQL primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL primary containers + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL primary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.annotations [object] MySQL primary persistent volume claim annotations + ## + annotations: {} + ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size MySQL primary persistent volume size + ## + size: 8Gi + ## @param primary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.initContainers [array] Add additional init containers for the MySQL Primary pod(s) + ## + initContainers: [] + ## @param primary.sidecars [array] Add additional sidecar containers for the MySQL Primary pod(s) + ## + sidecars: [] + ## MySQL Primary Service parameters + ## + service: + ## @param primary.service.type MySQL Primary K8s service type + ## + type: ClusterIP + ## @param primary.service.port MySQL Primary K8s service port + ## + port: 3306 + ## @param primary.service.nodePort MySQL Primary K8s service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param primary.service.clusterIP MySQL Primary K8s service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.loadBalancerIP MySQL Primary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL Primary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param primary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL primary pods + ## + enabled: false + ## @param primary.pdb.minAvailable Minimum number/percentage of MySQL primary pods that should remain scheduled + ## + minAvailable: 1 + ## @param primary.pdb.maxUnavailable Maximum number/percentage of MySQL primary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param primary.podLabels [object] MySQL Primary pod label. If labels are same as commonLabels , this will take precedence + ## + podLabels: {} + +## @section MySQL Secondary parameters + +secondary: + ## @param secondary.replicaCount Number of MySQL secondary replicas + ## + replicaCount: 1 + ## @param secondary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param secondary.command [array] Override default container command on MySQL Secondary container(s) (useful when using custom images) + ## + command: [] + ## @param secondary.args [array] Override default container args on MySQL Secondary container(s) (useful when using custom images) + ## + args: [] + ## @param secondary.configuration [string] Configure MySQL Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param secondary.existingConfigmap Name of existing ConfigMap with MySQL Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param secondary.updateStrategy Update strategy type for the MySQL secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param secondary.rollingUpdatePartition Partition update strategy for MySQL Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param secondary.podAnnotations [object] Additional pod annotations for MySQL secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param secondary.podAffinityPreset MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param secondary.podAntiAffinityPreset MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + ## MySQL Secondary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param secondary.nodeAffinityPreset.type MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param secondary.nodeAffinityPreset.key MySQL secondary node label key to match Ignored if `secondary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param secondary.nodeAffinityPreset.values [array] MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param secondary.affinity [object] Affinity for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param secondary.nodeSelector [object] Node labels for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param secondary.tolerations [array] Tolerations for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param secondary.podSecurityContext.enabled Enable security context for MySQL secondary pods + ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param secondary.containerSecurityContext.enabled MySQL secondary container securityContext + ## @param secondary.containerSecurityContext.runAsUser User ID for the MySQL secondary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL secondary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param secondary.resources.limits [object] The resources limits for MySQL secondary containers + ## @param secondary.resources.requests [object] The requested resources for MySQL secondary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.livenessProbe.enabled Enable livenessProbe + ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.readinessProbe.enabled Enable readinessProbe + ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.startupProbe.enabled Enable startupProbe + ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param secondary.customLivenessProbe [object] Override default liveness probe for MySQL secondary containers + ## + customLivenessProbe: {} + ## @param secondary.customReadinessProbe [object] Override default readiness probe for MySQL secondary containers + ## + customReadinessProbe: {} + ## @param secondary.customStartupProbe [object] Override default startup probe for MySQL secondary containers + ## + customStartupProbe: {} + ## @param secondary.extraFlags MySQL secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param secondary.extraEnvVars [array] An array to add extra environment variables on MySQL secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL secondary containers + ## + extraEnvVarsCM: "" + ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL secondary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param secondary.persistence.enabled Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` + ## + enabled: true + ## @param secondary.persistence.storageClass MySQL secondary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param secondary.persistence.annotations [object] MySQL secondary persistent volume claim annotations + ## + annotations: {} + ## @param secondary.persistence.accessModes MySQL secondary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param secondary.persistence.size MySQL secondary persistent volume size + ## + size: 8Gi + ## @param secondary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param secondary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL secondary pod(s) + ## + extraVolumes: [] + ## @param secondary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) + ## + extraVolumeMounts: [] + ## @param secondary.initContainers [array] Add additional init containers for the MySQL secondary pod(s) + ## + initContainers: [] + ## @param secondary.sidecars [array] Add additional sidecar containers for the MySQL secondary pod(s) + ## + sidecars: [] + ## MySQL Secondary Service parameters + ## + service: + ## @param secondary.service.type MySQL secondary Kubernetes service type + ## + type: ClusterIP + ## @param secondary.service.port MySQL secondary Kubernetes service port + ## + port: 3306 + ## @param secondary.service.nodePort MySQL secondary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param secondary.service.clusterIP MySQL secondary Kubernetes service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param secondary.service.loadBalancerIP MySQL secondary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param secondary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL secondary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param secondary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param secondary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL secondary pods + ## + enabled: false + ## @param secondary.pdb.minAvailable Minimum number/percentage of MySQL secondary pods that should remain scheduled + ## + minAvailable: 1 + ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MySQL secondary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param secondary.podLabels [object] Additional pod labels for MySQL secondary pods + ## + podLabels: {} + +## @section RBAC parameters + +## MySQL pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MySQL pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the mysql.fullname template + ## + name: "" + ## @param serviceAccount.annotations [object] Annotations for MySQL Service Account + ## + annotations: {} +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## + create: false + +## @section Network Policy + +## MySQL Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port MySQL is listening + ## on. When true, MySQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector [object] A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## @section Volume Permissions parameters ## Init containers parameters: ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. ## volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Init container' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - -## Use existing secret (ignores root, db and replication passwords) -## -# existingSecret: - -## Admin (root) credentials -## -root: - ## MySQL admin password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount admin password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## Custom user/db credentials -## -db: - ## MySQL username and password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run - ## Note that this user should be different from the MySQL replication user (replication.user) - ## - user: - password: - ## Database to create - ## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run - ## - name: my_database - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## Replication configuration -## -replication: - ## Enable replication. This enables the creation of replicas of MySQL. If false, only a - ## master deployment would be created - ## - enabled: true - ## - ## MySQL replication user - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## Note that this user should be different from the MySQL user (db.user) - ## - user: replicator - ## MySQL replication user password - ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster - ## - password: - ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. - ## If it is not force, a random password will be generated. - ## - forcePassword: false - ## Mount replication user password as a file instead of using an environment variable - ## - injectSecretsAsVolume: false - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." -# -## ConfigMap with scripts to be run at first boot -## Note: This will override initdbScripts -# initdbScriptsConfigMap: - -serviceAccount: - create: true - ## Specify the name of the service account created/used - # name: - -## Master nodes parameters -## -master: - ## Configure MySQL with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - plugin_dir=/opt/bitnami/mysql/plugin - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - plugin_dir=/opt/bitnami/mysql/plugin - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for master nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL master pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL master containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL master containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL master containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on master nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## Slave nodes parameters -## -slave: - ## Number of slave replicas - ## - replicas: 1 - - ## Configure MySQL slave with a custom my.cnf file - ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file - ## - config: |- - [mysqld] - default_authentication_plugin=mysql_native_password - skip-name-resolve - explicit_defaults_for_timestamp - basedir=/opt/bitnami/mysql - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - datadir=/bitnami/mysql/data - tmpdir=/opt/bitnami/mysql/tmp - max_allowed_packet=16M - bind-address=0.0.0.0 - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - log-error=/opt/bitnami/mysql/logs/mysqld.log - character-set-server=UTF8 - collation-server=utf8_general_ci - - [client] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - default-character-set=UTF8 - - [manager] - port=3306 - socket=/opt/bitnami/mysql/tmp/mysql.sock - pid-file=/opt/bitnami/mysql/tmp/mysqld.pid - - ## updateStrategy for slave nodes - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - - ## Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - - ## Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## An array to add extra environment variables - ## For example: - ## extraEnvVars: - ## - name: TZ - ## value: "Europe/Paris" - ## - extraEnvVars: - - ## ConfigMap with extra env vars: - ## - extraEnvVarsCM: - - ## Secret with extra env vars: - ## - extraEnvVarsSecret: - - ## Node labels for pod assignment. Evaluated as a template. - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## MySQL slave pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - - ## MySQL slave containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: - ## containerSecurityContext: - ## capabilities: - ## drop: ["NET_RAW"] - ## readOnlyRootFilesystem: true - ## - containerSecurityContext: {} - - ## MySQL slave containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - - ## MySQL slave containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - enabled: true - ## Initializing the database could take some time - ## - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - ## Enable persistence using PVCs on slave nodes - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## If true, use a Persistent Volume Claim, If false, use emptyDir - ## - enabled: true - mountPath: /bitnami/mysql - ## Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - ## PVC annotations - ## - annotations: {} - ## Persistent Volume Access Mode - ## - accessModes: - - ReadWriteOnce - ## Persistent Volume size - ## - size: 8Gi - ## Use an existing PVC - ## - # existingClaim: - -## MySQL Service properties -## -service: - ## MySQL Service type - ## - type: ClusterIP - - #name: {{ service_name }} - ## MySQL Service port - ## - port: 3306 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: - master: - slave: - - ## Provide any additional annotations which may be required. This can be used to - ## set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - ## loadBalancerIP for the MySQL Service (optional, cloud specific) - ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer - ## - # loadBalancerIP: - # master: - # slave: -## MySQL prometheus metrics parameters -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint -## -metrics: - enabled: false - ## Bitnami MySQL Prometheus exporter image - ## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/ + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets [array] Specify docker-registry secret names as an array ## image: registry: docker.io - repository: bitnami/mysqld-exporter - tag: 0.12.1-debian-10-r127 + repository: bitnami/bitnami-shell + tag: 10-debian-10-r409 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - ## MySQL Prometheus exporter containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + pullSecrets: [] + ## @param volumePermissions.resources [object] Init container volume-permissions resources ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 0.5 - # memory: 256Mi - requests: {} - # cpu: 0.5 - # memory: 256Mi + resources: {} +## @section Metrics parameters + +## Mysqld Prometheus exporter parameters +## +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry Exporter image registry + ## @param metrics.image.repository Exporter image repository + ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Exporter image pull policy + ## @param metrics.image.pullSecrets [array] Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.14.0-debian-10-r53 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] ## MySQL Prometheus exporter service parameters + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.service.type Kubernetes service type for MySQL Prometheus Exporter + ## @param metrics.service.port MySQL Prometheus Exporter service port + ## @param metrics.service.annotations [object] Prometheus exporter service annotations ## service: type: ClusterIP @@ -555,27 +902,125 @@ metrics: annotations: prometheus.io/scrape: "true" prometheus.io/port: "{{ .Values.metrics.service.port }}" - - ## Prometheus Operator ServiceMonitor configuration + ## @param metrics.extraArgs.primary [array] Extra args to be passed to mysqld_exporter on Primary pods + ## @param metrics.extraArgs.secondary [array] Extra args to be passed to mysqld_exporter on Secondary pods + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + ## Mysqld Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits [object] The resources limits for MySQL prometheus exporter containers + ## @param metrics.resources.requests [object] The requested resources for MySQL prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Mysqld Prometheus exporter liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Mysqld Prometheus exporter readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator ## serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## enabled: false - ## Namespace in which Prometheus is running + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings [array] Specify Metric Relabellings to add to the scrape endpoint ## - # selector: - # prometheus: my-prometheus + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels [object] Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/lib/helm-freeze.yaml b/lib/helm-freeze.yaml index 37e35087..c9b70c2a 100644 --- a/lib/helm-freeze.yaml +++ b/lib/helm-freeze.yaml @@ -1,6 +1,6 @@ charts: - name: cert-manager - version: v1.1.1 + version: v1.2.0 repo_name: jetstack - name: external-dns repo_name: bitnami @@ -21,15 +21,15 @@ charts: - name: aws-node-termination-handler repo_name: aws dest: aws-bootstrap - version: 0.8.0 + version: 0.16.1 - name: aws-vpc-cni repo_name: aws dest: aws-bootstrap - version: 1.1.3 + version: 1.1.13 - name: aws-calico repo_name: aws dest: aws-bootstrap - version: 0.3.1 + version: 0.3.10 - name: cluster-autoscaler repo_name: cluster-autoscaler version: 9.10.4 @@ -55,9 +55,8 @@ charts: # Chart is frozen due to custom modification see https://github.com/Qovery/engine/pull/293 - name: mysql repo_name: bitnami - version: 6.14.2 + version: 8.9.6 dest: services - no_sync: true - name: postgresql repo_name: bitnami version: 8.9.8 diff --git a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs index 78469eb7..77e73e83 100644 --- a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs @@ -205,6 +205,7 @@ pub fn eks_aws_helm_charts( let aws_node_term_handler = CommonChart { chart_info: ChartInfo { name: "aws-node-term-handler".to_string(), + last_breaking_version_requiring_restart: Some(Version::new(0, 16, 1)), path: chart_path("charts/aws-node-termination-handler"), values: vec![ ChartSetValue { @@ -249,6 +250,16 @@ pub fn eks_aws_helm_charts( chart_info: ChartInfo { name: "calico".to_string(), path: chart_path("charts/aws-calico"), + values: vec![ + ChartSetValue { + key: "calico.node.resources.limits.memory".to_string(), + value: "128Mi".to_string(), + }, + ChartSetValue { + key: "calico.node.resources.requests.memory".to_string(), + value: "128Mi".to_string(), + }, + ], ..Default::default() }, }; @@ -784,6 +795,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs index f6db1401..3e9f733e 100644 --- a/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs +++ b/src/cloud_provider/digitalocean/kubernetes/helm_charts.rs @@ -611,6 +611,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 8e252bff..5f8e1702 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -2,6 +2,9 @@ use crate::cloud_provider::helm::HelmAction::Deploy; use crate::cloud_provider::helm::HelmChartNamespaces::KubeSystem; use crate::cloud_provider::qovery::{get_qovery_app_version, EngineLocation, QoveryAppName, QoveryShellAgent}; use crate::cmd::helm::{to_command_error, Helm}; +use crate::cmd::helm_utils::{ + apply_chart_backup, delete_unused_chart_backup, prepare_chart_backup_on_upgrade, BackupStatus, +}; use crate::cmd::kubectl::{ kubectl_delete_crash_looping_pods, kubectl_exec_delete_crd, kubectl_exec_get_configmap, kubectl_exec_get_events, kubectl_exec_rollout_restart_deployment, kubectl_exec_with_output, @@ -12,7 +15,7 @@ use crate::utilities::calculate_hash; use semver::Version; use std::collections::HashMap; use std::fmt::{Display, Formatter}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::{fs, thread}; use thread::spawn; use tracing::{span, Level}; @@ -82,6 +85,7 @@ pub struct ChartInfo { pub yaml_files_content: Vec, pub parse_stderr_for_error: bool, pub k8s_selector: Option, + pub backup_resources: Option>, } impl ChartInfo { @@ -146,6 +150,7 @@ impl Default for ChartInfo { yaml_files_content: vec![], parse_stderr_for_error: true, k8s_selector: None, + backup_resources: None, } } } @@ -232,7 +237,61 @@ pub trait HelmChart: Send { ); } - helm.upgrade(chart_info, &[]).map_err(to_command_error)?; + let installed_version = match helm.get_chart_version( + chart_info.name.clone(), + Some(chart_info.get_namespace_string().as_str()), + environment_variables.as_slice(), + ) { + Ok(version) => version, + Err(e) => { + warn!("error while trying to get installed version: {:?}", e); + None + } + }; + + let upgrade_status = match prepare_chart_backup_on_upgrade( + kubernetes_config, + chart_info.clone(), + environment_variables.as_slice(), + installed_version, + ) { + Ok(status) => status, + Err(e) => { + warn!("error while trying to prepare backup: {:?}", e); + BackupStatus { + is_backupable: false, + backup_path: PathBuf::new(), + } + } + }; + + match helm.upgrade(chart_info, &[]).map_err(to_command_error) { + Ok(_) => { + if upgrade_status.is_backupable { + if let Err(e) = apply_chart_backup( + kubernetes_config, + upgrade_status.backup_path.as_path(), + environment_variables.as_slice(), + chart_info, + ) { + warn!("error while trying to apply backup: {:?}", e); + }; + } + } + Err(e) => { + if upgrade_status.is_backupable { + if let Err(e) = delete_unused_chart_backup( + kubernetes_config, + environment_variables.as_slice(), + chart_info, + ) { + warn!("error while trying to delete backup: {:?}", e); + } + } + + return Err(e); + } + } } HelmAction::Destroy => { let chart_info = self.get_chart_info(); @@ -358,7 +417,7 @@ pub fn deploy_charts_levels( // Common charts // -#[derive(Default)] +#[derive(Default, Clone)] pub struct CommonChart { pub chart_info: ChartInfo, } diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 75682e0a..c35e17b4 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -560,6 +560,7 @@ datasources: name: "cert-manager-configs".to_string(), path: chart_path("common/charts/cert-manager-configs"), namespace: HelmChartNamespaces::CertManager, + backup_resources: Some(vec!["cert".to_string(), "issuer".to_string(), "clusterissuer".to_string()]), values: vec![ ChartSetValue { key: "externalDnsProvider".to_string(), diff --git a/src/cmd/helm.rs b/src/cmd/helm.rs index bb0aca91..b63e5486 100644 --- a/src/cmd/helm.rs +++ b/src/cmd/helm.rs @@ -504,40 +504,38 @@ impl Helm { }, ); - match helm_ret { - // Ok is ok - Ok(_) => Ok(()), - Err(err) => { - error!("Helm error: {:?}", err); + if let Err(err) = helm_ret { + error!("Helm error: {:?}", err); - // Try do define/specify a bit more the message - let stderr_msg: String = error_message.into_iter().collect(); - let stderr_msg = format!( - "{}: {}", - stderr_msg, - err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) - ); - let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { - HelmError::ReleaseLocked(chart.name.clone()) - } else if stderr_msg.contains("has been rolled back") { - HelmError::Rollbacked(chart.name.clone(), UPGRADE) - } else if stderr_msg.contains("timed out waiting") { - HelmError::Timeout(chart.name.clone(), UPGRADE, stderr_msg) - } else { - CmdError( - chart.name.clone(), - HelmCommand::UPGRADE, - CommandError::new( - "Helm error".to_string(), - Some(stderr_msg), - Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), - ), - ) - }; + // Try do define/specify a bit more the message + let stderr_msg: String = error_message.into_iter().collect(); + let stderr_msg = format!( + "{}: {}", + stderr_msg, + err.message(ErrorMessageVerbosity::FullDetailsWithoutEnvVars) + ); + let error = if stderr_msg.contains("another operation (install/upgrade/rollback) is in progress") { + HelmError::ReleaseLocked(chart.name.clone()) + } else if stderr_msg.contains("has been rolled back") { + HelmError::Rollbacked(chart.name.clone(), UPGRADE) + } else if stderr_msg.contains("timed out waiting") { + HelmError::Timeout(chart.name.clone(), UPGRADE, stderr_msg) + } else { + CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + "Helm error".to_string(), + Some(stderr_msg), + Some(envs.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect()), + ), + ) + }; - Err(error) - } - } + return Err(error); + }; + + Ok(()) } pub fn uninstall_chart_if_breaking_version( @@ -612,17 +610,19 @@ mod tests { struct HelmTestCtx { helm: Helm, - chart: ChartInfo, + charts: Vec, } impl HelmTestCtx { fn cleanup(&self) { - let ret = self.helm.uninstall(&self.chart, &[]); - assert!(ret.is_ok()) + for chart in &self.charts { + let ret = self.helm.uninstall(chart, &vec![]); + assert!(ret.is_ok()) + } } fn new(release_name: &str) -> HelmTestCtx { - let chart = ChartInfo::new_from_custom_namespace( + let charts = vec![ChartInfo::new_from_custom_namespace( release_name.to_string(), "tests/helm/simple_nginx".to_string(), "default".to_string(), @@ -630,12 +630,12 @@ mod tests { vec![], false, None, - ); + )]; let mut kube_config = dirs::home_dir().unwrap(); kube_config.push(".kube/config"); - let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); + let helm = Helm::new(kube_config.to_str().unwrap(), &vec![]).unwrap(); - let cleanup = HelmTestCtx { helm, chart }; + let cleanup = HelmTestCtx { helm, charts }; cleanup.cleanup(); cleanup } @@ -656,26 +656,26 @@ mod tests { #[test] fn test_release_exist() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-release-exist"); - let ret = helm.check_release_exist(chart, &[]); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-release-exist"); + let ret = helm.check_release_exist(&charts[0], &vec![]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)) + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)) } #[test] fn test_list_release() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-list-release"); - chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); + charts[0].custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); // no existing namespace should return an empty array let ret = helm.list_release(Some("tsdfsfsdf"), &[]); assert!(matches!(ret, Ok(vec) if vec.is_empty())); // install something - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // We should have at least one release in all the release @@ -683,69 +683,69 @@ mod tests { assert!(matches!(ret, Ok(vec) if !vec.is_empty())); // We should have at least one release in all the release - let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); + let ret = helm.list_release(Some(&charts[0].get_namespace_string()), &vec![]); assert!(matches!(ret, Ok(vec) if vec.len() == 1)); // Install a second stuff let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-list-release-2"); - chart.custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); - let ret = helm.upgrade(chart, &[]); + charts[0].custom_namespace = Some("hello-my-friend-this-is-a-test".to_string()); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); - let ret = helm.list_release(Some(&chart.get_namespace_string()), &[]); + let ret = helm.list_release(Some(&charts[0].get_namespace_string()), &vec![]); assert!(matches!(ret, Ok(vec) if vec.len() == 2)); } #[test] fn test_upgrade_diff() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-diff"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade-diff"); - let ret = helm.upgrade_diff(chart, &[]); + let ret = helm.upgrade_diff(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); } #[test] fn test_rollback() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-rollback"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-rollback"); // check release does not exist yet - let ret = helm.rollback(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.rollback(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // First revision cannot be rollback - let ret = helm.rollback(chart, &[]); + let ret = helm.rollback(&charts[0], &vec![]); assert!(matches!(ret, Err(HelmError::CannotRollback(_)))); // 2nd upgrade - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Rollback should be ok now - let ret = helm.rollback(chart, &[]); + let ret = helm.rollback(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); } #[test] fn test_upgrade() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(_))); } @@ -753,37 +753,37 @@ mod tests { fn test_upgrade_timeout() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-upgrade-timeout"); - chart.timeout_in_seconds = 1; + charts[0].timeout_in_seconds = 1; // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(HelmError::Timeout(_, _, _)))); // Release should not exist if it fails - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); } #[test] fn test_upgrade_with_lock_during_install() { // We want to check that we manage to install a chart even if a lock is present while it was the first installation - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-upgrade-with-lock-install"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-upgrade-with-lock-install"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // Spawn our task killer let barrier = Arc::new(Barrier::new(2)); std::thread::spawn({ let barrier = barrier.clone(); - let chart_name = chart.name.clone(); + let chart_name = charts[0].name.clone(); move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); @@ -794,19 +794,19 @@ mod tests { // install it barrier.wait(); - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if release.is_locked())); // New installation should work even if a lock is present - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if !release.is_locked())); } @@ -815,22 +815,22 @@ mod tests { // We want to check that we manage to install a chart even if a lock is present while it not the first installation let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-upgrade-with-lock-upgrade"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // First install - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Spawn our task killer let barrier = Arc::new(Barrier::new(2)); std::thread::spawn({ let barrier = barrier.clone(); - let chart_name = chart.name.clone(); + let chart_name = charts[0].name.clone(); move || { barrier.wait(); thread::sleep(Duration::from_millis(3000)); @@ -839,64 +839,64 @@ mod tests { } }); - chart.values = vec![ChartSetValue { + charts[0].values = vec![ChartSetValue { key: "initialDelaySeconds".to_string(), value: "6".to_string(), }]; barrier.wait(); - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Err(_))); // Release should be locked - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if release.is_locked() && release.version == 2)); // New installation should work even if a lock is present - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // Release should not be locked anymore - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(release) if !release.is_locked() && release.version == 4)); } #[test] fn test_uninstall() { - let HelmTestCtx { ref helm, ref chart } = HelmTestCtx::new("test-uninstall"); + let HelmTestCtx { ref helm, ref charts } = HelmTestCtx::new("test-uninstall"); // check release does not exist yet - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); // deleting something that does not exist should not be an issue - let ret = helm.uninstall(chart, &[]); + let ret = helm.uninstall(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // install it - let ret = helm.upgrade(chart, &[]); + let ret = helm.upgrade(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check now it exists - let ret = helm.check_release_exist(chart, &[]); + let ret = helm.check_release_exist(&charts[0], &vec![]); assert!(matches!(ret, Ok(_))); // Delete it - let ret = helm.uninstall(chart, &[]); + let ret = helm.uninstall(&charts[0], &vec![]); assert!(matches!(ret, Ok(()))); // check release does not exist anymore - let ret = helm.check_release_exist(chart, &[]); - assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == chart.name)); + let ret = helm.check_release_exist(&charts[0], &vec![]); + assert!(matches!(ret, Err(HelmError::ReleaseDoesNotExist(test)) if test == charts[0].name)); } #[test] fn test_getting_version() { let HelmTestCtx { ref helm, - ref mut chart, + ref mut charts, } = HelmTestCtx::new("test-version-release"); - let _ = helm.upgrade(chart, &[]); - let releases = helm.list_release(Some(&chart.get_namespace_string()), &[]).unwrap(); + let _ = helm.upgrade(&charts[0], &[]); + let releases = helm.list_release(Some(&charts[0].get_namespace_string()), &[]).unwrap(); assert_eq!(releases[0].clone().version.unwrap(), Version::new(0, 1, 0)) } } diff --git a/src/cmd/helm_utils.rs b/src/cmd/helm_utils.rs new file mode 100644 index 00000000..2d894b0f --- /dev/null +++ b/src/cmd/helm_utils.rs @@ -0,0 +1,411 @@ +use crate::cloud_provider::helm::ChartInfo; +use crate::cmd::helm::HelmError::CmdError; +use crate::cmd::helm::{HelmCommand, HelmError}; +use crate::cmd::kubectl::{ + kubectl_apply_with_path, kubectl_create_secret_from_file, kubectl_delete_secret, kubectl_exec_get_secrets, + kubectl_get_resource_yaml, +}; +use crate::errors::CommandError; +use crate::fs::{ + create_yaml_backup_file, create_yaml_file_from_secret, indent_file, remove_lines_starting_with, + truncate_file_from_word, +}; +use semver::Version; +use serde_derive::Deserialize; +use std::fs::OpenOptions; +use std::io::{BufReader, Read}; +use std::path::{Path, PathBuf}; + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct Backup { + pub name: String, + pub content: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct BackupInfos { + pub name: String, + pub path: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ChartYAML { + #[serde(default)] + pub version: String, + #[serde(default)] + pub app_version: String, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct BackupStatus { + pub is_backupable: bool, + pub backup_path: PathBuf, +} + +pub fn prepare_chart_backup( + kubernetes_config: P, + workspace_root_dir: T, + chart: &ChartInfo, + envs: &[(&str, &str)], + backup_resources: Vec, +) -> Result, HelmError> +where + P: AsRef, + T: AsRef, +{ + let mut backups: Vec = vec![]; + for backup_resource in backup_resources { + match kubectl_get_resource_yaml( + &kubernetes_config, + envs.to_vec(), + backup_resource.as_str(), + Some(chart.get_namespace_string().as_str()), + ) { + Ok(content) => { + if !content.to_lowercase().contains("no resources found") { + backups.push(Backup { + name: backup_resource, + content, + }); + }; + } + Err(e) => { + error!("Kubectl error: {:?}", e.message_safe()) + } + }; + } + + let mut backup_infos: Vec = vec![]; + + if backups.is_empty() { + return Ok(backup_infos); + } + + for backup in backups.clone() { + if !backup.content.is_empty() && !backup.content.contains("items: []") { + match create_yaml_backup_file( + workspace_root_dir.as_ref(), + chart.name.to_string(), + Some(backup.name.clone()), + backup.content, + ) { + Ok(path) => { + backup_infos.push(BackupInfos { + name: backup.name, + path, + }); + } + Err(e) => { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while creating YAML backup file for {}.", backup.name), + Some(e.to_string()), + None, + ), + )) + } + } + } + } + + for backup_info in backup_infos.clone() { + if let Err(e) = remove_lines_starting_with( + backup_info.path.clone(), + vec!["resourceVersion", "uid", "apiVersion: v1", "items", "kind: List"], + ) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + if let Err(e) = truncate_file_from_word(backup_info.path.clone(), "metadata") { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + if let Err(e) = indent_file(backup_info.path.clone()) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new( + format!("Error while editing YAML backup file {}.", backup_info.name), + Some(e.to_string()), + None, + ), + )); + } + + let backup_name = format!("{}-{}-q-backup", chart.name, backup_info.name); + if let Err(e) = kubectl_create_secret_from_file( + &kubernetes_config, + envs.to_vec(), + Some(chart.namespace.to_string().as_str()), + backup_name, + backup_info.name, + backup_info.path, + ) { + return Err(CmdError( + chart.name.clone(), + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + } + } + + Ok(backup_infos) +} + +pub fn apply_chart_backup

( + kubernetes_config: P, + workspace_root_dir: P, + envs: &[(&str, &str)], + chart: &ChartInfo, +) -> Result<(), HelmError> +where + P: AsRef, +{ + let secrets = kubectl_exec_get_secrets( + &kubernetes_config, + chart.clone().namespace.to_string().as_str(), + "", + envs.to_vec(), + ) + .map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + ) + })? + .items; + + for secret in secrets { + if secret.metadata.name.contains("-q-backup") { + let path = match create_yaml_file_from_secret(&workspace_root_dir, secret.clone()) { + Ok(path) => path, + Err(e) => match e.message_safe().to_lowercase().contains("no content") { + true => match kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + Ok(_) => continue, + Err(e) => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )) + } + }, + false => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )) + } + }, + }; + + if let Err(e) = kubectl_apply_with_path(&kubernetes_config, envs.to_vec(), path.as_str()) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + + if let Err(e) = kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + } + } + + Ok(()) +} + +pub fn delete_unused_chart_backup

( + kubernetes_config: P, + envs: &[(&str, &str)], + chart: &ChartInfo, +) -> Result<(), HelmError> +where + P: AsRef, +{ + let secrets = kubectl_exec_get_secrets( + &kubernetes_config, + chart.clone().namespace.to_string().as_str(), + "", + envs.to_vec(), + ) + .map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + ) + })? + .items; + + for secret in secrets { + if secret.metadata.name.contains("-q-backup") { + if let Err(e) = kubectl_delete_secret( + &kubernetes_config, + envs.to_vec(), + Some(chart.clone().namespace.to_string().as_str()), + secret.metadata.name, + ) { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new(e.message_safe(), e.message_raw(), None), + )); + }; + } + } + + Ok(()) +} + +pub fn get_common_helm_chart_infos(chart: &ChartInfo) -> Result { + let string_path = format!("{}/Chart.yaml", chart.path); + let file = OpenOptions::new().read(true).open(string_path.as_str()).map_err(|e| { + CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart infos for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + ) + })?; + let mut content = String::new(); + let _ = BufReader::new(file).read_to_string(&mut content); + match serde_yaml::from_str::(content.as_str()) { + Ok(chart_yaml) => Ok(chart_yaml), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart infos for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + } +} + +pub fn get_common_helm_chart_version(chart: &ChartInfo) -> Result, HelmError> { + let chart_yaml = match get_common_helm_chart_infos(chart) { + Ok(chart_yaml) => chart_yaml, + Err(e) => { + return Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )) + } + }; + + if !chart_yaml.app_version.is_empty() { + return match Version::parse(chart_yaml.app_version.as_str()) { + Ok(version) => Ok(Some(version)), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + }; + } + + if !chart_yaml.version.is_empty() { + return match Version::parse(chart_yaml.version.as_str()) { + Ok(version) => Ok(Some(version)), + Err(e) => Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new( + format!("Unable to get chart version for {}.", chart.name.clone()), + Some(e.to_string()), + None, + ), + )), + }; + } + + Err(CmdError( + chart.clone().name, + HelmCommand::UPGRADE, + CommandError::new_from_safe_message(format!("Unable to get chart version for {}.", chart.name.clone())), + )) +} + +pub fn prepare_chart_backup_on_upgrade

( + kubernetes_config: P, + chart: ChartInfo, + envs: &[(&str, &str)], + installed_version: Option, +) -> Result +where + P: AsRef, +{ + let mut need_backup = false; + let root_dir_path = std::env::temp_dir(); + + if chart.backup_resources.is_some() { + if installed_version.le(&get_common_helm_chart_version(&chart)?) { + if let Err(e) = prepare_chart_backup( + kubernetes_config, + root_dir_path.as_path(), + &chart, + envs, + chart.backup_resources.as_ref().unwrap().to_vec(), + ) { + return Err(e); + }; + + need_backup = true; + } + } + + Ok(BackupStatus { + is_backupable: need_backup, + backup_path: root_dir_path, + }) +} diff --git a/src/cmd/kubectl.rs b/src/cmd/kubectl.rs index 339a8261..8f9a8363 100644 --- a/src/cmd/kubectl.rs +++ b/src/cmd/kubectl.rs @@ -1,3 +1,6 @@ +use std::fmt::Debug; +use std::fs::File; +use std::io::Read; use std::path::Path; use retry::delay::Fibonacci; @@ -1204,6 +1207,30 @@ where Ok(result) } +fn kubectl_exec_raw_output

( + args: Vec<&str>, + kubernetes_config: P, + envs: Vec<(&str, &str)>, + keep_format: bool, +) -> Result +where + P: AsRef, +{ + let mut _envs = Vec::with_capacity(envs.len() + 1); + _envs.push((KUBECONFIG, kubernetes_config.as_ref().to_str().unwrap())); + _envs.extend(envs); + + let mut output_vec: Vec = Vec::with_capacity(50); + let _ = kubectl_exec_with_output(args.clone(), _envs.clone(), &mut |line| output_vec.push(line), &mut |line| { + error!("{}", line) + })?; + + match keep_format { + true => Ok(output_vec.join("\n")), + false => Ok(output_vec.join("")), + } +} + pub fn kubernetes_get_all_pdbs

( kubernetes_config: P, envs: Vec<(&str, &str)>, @@ -1245,3 +1272,98 @@ where kubectl_exec::(cmd_args, kubernetes_config, envs) } + +pub fn kubectl_get_resource_yaml

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + resource: &str, + namespace: Option<&str>, +) -> Result +where + P: AsRef, +{ + let mut cmd_args = vec!["get", resource, "-oyaml"]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, true) +} + +pub fn kubectl_apply_with_path

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + file_path: &str, +) -> Result +where + P: AsRef, +{ + kubectl_exec_raw_output::

(vec!["apply", "-f", file_path], kubernetes_config, envs, false) +} + +pub fn kubectl_create_secret

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + secret_name: String, + key: String, + value: String, +) -> Result +where + P: AsRef, +{ + let secret_arg = format!("--from-literal={}=\"{}\"", key, value); + let mut cmd_args = vec!["create", "secret", "generic", secret_name.as_str(), secret_arg.as_str()]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, false) +} + +pub fn kubectl_delete_secret

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + secret_name: String, +) -> Result +where + P: AsRef, +{ + let mut cmd_args = vec!["delete", "secret", secret_name.as_str()]; + match namespace { + Some(n) => { + cmd_args.push("-n"); + cmd_args.push(n); + } + None => cmd_args.push("--all-namespaces"), + } + + kubectl_exec_raw_output(cmd_args, kubernetes_config, envs, false) +} + +pub fn kubectl_create_secret_from_file

( + kubernetes_config: P, + envs: Vec<(&str, &str)>, + namespace: Option<&str>, + backup_name: String, + key: String, + file_path: String, +) -> Result +where + P: AsRef, +{ + let mut file = File::open(file_path.as_str()).unwrap(); + let mut content = String::new(); + let _ = file.read_to_string(&mut content); + + kubectl_create_secret(kubernetes_config, envs, namespace, backup_name, key, content) +} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index 153aab46..d7ff53ec 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -1,6 +1,7 @@ pub mod command; pub mod docker; pub mod helm; +pub mod helm_utils; pub mod kubectl; pub mod structs; pub mod terraform; diff --git a/src/cmd/structs.rs b/src/cmd/structs.rs index 238024eb..0e6135a8 100644 --- a/src/cmd/structs.rs +++ b/src/cmd/structs.rs @@ -42,6 +42,7 @@ pub struct SecretItem { pub api_version: String, pub kind: String, pub metadata: SecretMetadata, + pub data: HashMap, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] diff --git a/src/fs.rs b/src/fs.rs index 7a68a48f..e35deca8 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -1,11 +1,16 @@ use std::collections::HashSet; use std::fs; -use std::fs::{create_dir_all, File}; -use std::io::{Error, ErrorKind}; +use std::fs::{create_dir_all, File, OpenOptions}; +use std::io::{BufRead, BufReader, Error, ErrorKind, Write}; use std::path::Path; +use crate::cmd::structs::SecretItem; +use crate::errors::CommandError; +use base64::decode; use flate2::write::GzEncoder; use flate2::Compression; +use itertools::Itertools; +use serde::__private::from_utf8_lossy; use std::ffi::OsStr; use walkdir::WalkDir; @@ -151,6 +156,232 @@ pub fn create_workspace_archive(working_root_dir: &str, execution_id: &str) -> R } } +pub fn create_yaml_backup_file

( + working_root_dir: P, + chart_name: String, + resource_name: Option, + content: String, +) -> Result +where + P: AsRef, +{ + let dir = working_root_dir.as_ref().join("backups"); + + if let Err(e) = create_dir_all(&dir) { + return Err(CommandError::new( + "Unable to create root dir path.".to_string(), + Some(e.to_string()), + None, + )); + } + + let root_path = dir + .to_str() + .map(|e| e.to_string()) + .ok_or_else(|| CommandError::new_from_safe_message("Unable to get backups root dir path.".to_string())); + + let string_path = match resource_name.is_some() { + true => format!( + "{}/{}-{}-q-backup.yaml", + root_path?, + chart_name, + resource_name.as_ref().unwrap() + ), + false => format!("{}/{}.yaml", root_path?, chart_name), + }; + let str_path = string_path.as_str(); + let path = Path::new(str_path); + + let mut file = match File::create(&path) { + Err(e) => { + return Err(CommandError::new( + format!("Unable to create YAML backup file for chart {}.", chart_name), + Some(e.to_string()), + None, + )) + } + Ok(file) => file, + }; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file for chart {}.", chart_name), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path.to_str().map(|e| e.to_string()).ok_or_else(|| { + CommandError::new_from_safe_message(format!( + "Unable to get YAML backup file path for chart {}.", + chart_name + )) + })?), + } +} + +pub fn remove_lines_starting_with(path: String, starters: Vec<&str>) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let mut content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + for starter in starters { + content = content + .into_iter() + .filter(|line| !line.contains(starter)) + .collect::>() + } + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.join("\n").as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn truncate_file_from_word(path: String, truncate_from: &str) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let content_vec = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + let truncate_from_index = match content_vec.iter().rposition(|line| line.contains(truncate_from)) { + None => content_vec.len(), + Some(index) => index, + }; + + let content = Vec::from(&content_vec[..truncate_from_index]).join("\n"); + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn indent_file(path: String) -> Result { + let file = OpenOptions::new().read(true).open(path.as_str()).map_err(|e| { + CommandError::new(format!("Unable to open YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + let file_content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>(); + + let content = file_content.iter().map(|line| line[2..].to_string()).join("\n"); + + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .open(path.as_str()) + .map_err(|e| { + CommandError::new(format!("Unable to edit YAML backup file {}.", path), Some(e.to_string()), None) + })?; + + match file.write(content.as_bytes()) { + Err(e) => Err(CommandError::new( + format!("Unable to edit YAML backup file {}.", path), + Some(e.to_string()), + None, + )), + Ok(_) => Ok(path), + } +} + +pub fn list_yaml_backup_files

(working_root_dir: P) -> Result, CommandError> +where + P: AsRef, +{ + let files = WalkDir::new(working_root_dir) + .follow_links(true) + .into_iter() + .filter_map(|e| e.ok()); + let mut backup_paths: Vec = vec![]; + for file in files { + if file + .file_name() + .to_str() + .ok_or_else(|| { + CommandError::new_from_safe_message(format!("Unable to get YAML backup file name {:?}.", file)) + })? + .to_string() + .contains("-q-backup.yaml") + { + backup_paths.push( + file.path() + .to_str() + .ok_or_else(|| { + CommandError::new_from_safe_message(format!("Unable to get YAML backup file name {:?}.", file)) + })? + .to_string(), + ) + } + } + + if backup_paths.is_empty() { + return Err(CommandError::new_from_safe_message( + "Unable to get YAML backup files".to_string(), + )); + } + + Ok(backup_paths) +} + +pub fn create_yaml_file_from_secret

(working_root_dir: P, secret: SecretItem) -> Result +where + P: AsRef, +{ + let message = format!("Unable to decode secret {}", secret.metadata.name); + let secret_data = secret.data.values().next(); + let secret_content = match secret_data.is_some() { + true => secret_data.unwrap().to_string(), + false => return Err(CommandError::new_from_safe_message(message)), + }; + + let content = match decode(secret_content) { + Ok(bytes) => from_utf8_lossy(&bytes[1..bytes.len() - 1]).to_string(), + Err(e) => return Err(CommandError::new(message, Some(e.to_string()), None)), + }; + match create_yaml_backup_file(working_root_dir.as_ref(), secret.metadata.name.clone(), None, content) { + Ok(path) => Ok(path), + Err(e) => Err(CommandError::new( + format!("Unable to create backup file from secret {}", secret.metadata.name), + Some(e.to_string()), + None, + )), + } +} + #[cfg(test)] mod tests { extern crate tempdir; @@ -159,7 +390,6 @@ mod tests { use flate2::read::GzDecoder; use std::collections::HashSet; use std::fs::File; - use std::io::prelude::*; use std::io::BufReader; use tempdir::TempDir; @@ -252,4 +482,102 @@ mod tests { tmp_files.into_iter().for_each(drop); tmp_dir.close().expect("error closing temporary directory"); } + + #[test] + fn test_backup_cleaning() { + let content = r#" + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + annotations: + meta.helm.sh/release-name: cert-manager-configs + meta.helm.sh/release-namespace: cert-manager + creationTimestamp: "2021-11-04T10:26:27Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: Helm + name: qovery + namespace: qovery + resourceVersion: "28347460" + uid: 509aad5f-db2d-44c3-b03b-beaf144118e2 + spec: + dnsNames: + - 'qovery' + issuerRef: + kind: ClusterIssuer + name: qovery + secretName: qovery + status: + conditions: + - lastTransitionTime: "2021-11-30T15:33:03Z" + message: Certificate is up to date and has not expired + reason: Ready + status: "True" + type: Ready + notAfter: "2022-04-29T13:34:51Z" + notBefore: "2022-01-29T13:34:52Z" + renewalTime: "2022-03-30T13:34:51Z" + revision: 3 + "#; + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let mut file_path = create_yaml_backup_file( + tmp_dir.path().to_str().unwrap(), + "test".to_string(), + Some("test".to_string()), + content.to_string(), + ) + .expect("No such file"); + file_path = remove_lines_starting_with(file_path, vec!["resourceVersion", "uid"]).unwrap(); + + let file = OpenOptions::new() + .read(true) + .write(true) + .open(file_path) + .expect("file doesn't exist"); + + let result = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>() + .join("\n"); + + let new_content = r#" + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + annotations: + meta.helm.sh/release-name: cert-manager-configs + meta.helm.sh/release-namespace: cert-manager + creationTimestamp: "2021-11-04T10:26:27Z" + generation: 2 + labels: + app.kubernetes.io/managed-by: Helm + name: qovery + namespace: qovery + spec: + dnsNames: + - 'qovery' + issuerRef: + kind: ClusterIssuer + name: qovery + secretName: qovery + status: + conditions: + - lastTransitionTime: "2021-11-30T15:33:03Z" + message: Certificate is up to date and has not expired + reason: Ready + status: "True" + type: Ready + notAfter: "2022-04-29T13:34:51Z" + notBefore: "2022-01-29T13:34:52Z" + renewalTime: "2022-03-30T13:34:51Z" + revision: 3 + "# + .to_string(); + + assert_eq!(result, new_content); + drop(file); + tmp_dir.close().expect("error closing temporary directory"); + } } diff --git a/test_utilities/src/common.rs b/test_utilities/src/common.rs index a62cfa02..c0e947f7 100644 --- a/test_utilities/src/common.rs +++ b/test_utilities/src/common.rs @@ -623,6 +623,48 @@ pub fn database_test_environment(context: &Context) -> EnvironmentRequest { } } +pub fn database_test_environment_on_upgrade(context: &Context) -> EnvironmentRequest { + let suffix = "c3dn5so3dltod3s"; + let application_name = format!("{}-{}", "simple-app".to_string(), &suffix); + + EnvironmentRequest { + execution_id: context.execution_id().to_string(), + id: "c4dn5so3dltod3s".to_string(), + owner_id: "c5dn5so3dltod3s".to_string(), + project_id: "c6dn5so3dltod3s".to_string(), + organization_id: context.organization_id().to_string(), + action: Action::Create, + applications: vec![Application { + long_id: Uuid::from_str("9d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(), + name: application_name, + git_url: "https://github.com/Qovery/engine-testing.git".to_string(), + commit_id: "fc575a2f3be0b9100492c8a463bf18134a8698a5".to_string(), + dockerfile_path: Some("Dockerfile".to_string()), + buildpack_language: None, + root_path: String::from("/"), + action: Action::Create, + git_credentials: Some(GitCredentials { + login: "x-access-token".to_string(), + access_token: "xxx".to_string(), + expired_at: Utc::now(), + }), + storage: vec![], + environment_vars: BTreeMap::default(), + branch: "basic-app-deploy".to_string(), + ports: vec![], + total_cpus: "100m".to_string(), + total_ram_in_mib: 256, + min_instances: 1, + max_instances: 1, + cpu_burst: "100m".to_string(), + advance_settings: Default::default(), + }], + routers: vec![], + databases: vec![], + clone_from_environment_id: None, + } +} + pub fn environment_only_http_server_router_with_sticky_session( context: &Context, test_domain: &str, @@ -1733,3 +1775,226 @@ where Err(e) => Err(e), } } + +pub fn test_db_on_upgrade( + context: Context, + logger: Box, + mut environment: EnvironmentRequest, + secrets: FuncTestsSecrets, + version: &str, + test_name: &str, + db_kind: DatabaseKind, + provider_kind: Kind, + database_mode: DatabaseMode, + is_public: bool, +) -> String { + init(); + + let span = span!(Level::INFO, "test", name = test_name); + let _enter = span.enter(); + let context_for_delete = context.clone_not_same_execution_id(); + + let app_id = Uuid::from_str("8d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(); + let database_username = "superuser".to_string(); + let database_password = "uxoyf358jojkemj".to_string(); + let db_kind_str = db_kind.name().to_string(); + let db_id = "c2dn5so3dltod3s".to_string(); + let database_host = format!("{}-{}", db_id, db_kind_str.clone()); + let database_fqdn = format!( + "{}.{}.{}", + database_host, + context.cluster_id(), + secrets + .clone() + .DEFAULT_TEST_DOMAIN + .expect("DEFAULT_TEST_DOMAIN is not set in secrets") + ); + + let db_infos = db_infos( + db_kind.clone(), + db_id.clone(), + database_mode.clone(), + database_username.clone(), + database_password.clone(), + if is_public { + database_fqdn.clone() + } else { + database_host.clone() + }, + ); + let database_port = db_infos.db_port.clone(); + let storage_size = 10; + let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone()); + let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone()); + let db = Database { + kind: db_kind.clone(), + action: Action::Create, + long_id: Uuid::from_str("7d0158db-b783-4bc2-a23b-c7d9228cbe90").unwrap(), + name: db_id.clone(), + version: version.to_string(), + fqdn_id: database_host.clone(), + fqdn: database_fqdn.clone(), + port: database_port.clone(), + username: database_username.clone(), + password: database_password.clone(), + total_cpus: "50m".to_string(), + total_ram_in_mib: 256, + disk_size_in_gib: storage_size.clone(), + database_instance_type: db_instance_type.to_string(), + database_disk_type: db_disk_type.to_string(), + encrypt_disk: true, + activate_high_availability: false, + activate_backups: false, + publicly_accessible: is_public.clone(), + mode: database_mode.clone(), + }; + + environment.databases = vec![db.clone()]; + + let app_name = format!("{}-app-{}", db_kind_str.clone(), generate_id()); + environment.applications = environment + .applications + .into_iter() + .map(|mut app| { + app.long_id = app_id.clone(); + app.name = to_short_id(&app_id); + app.branch = app_name.clone(); + app.commit_id = db_infos.app_commit.clone(); + app.ports = vec![Port { + id: "zdf7d6aad".to_string(), + long_id: Default::default(), + port: 1234, + public_port: Some(1234), + name: None, + publicly_accessible: true, + protocol: Protocol::HTTP, + }]; + app.dockerfile_path = Some(format!("Dockerfile-{}", version)); + app.environment_vars = db_infos.app_env_vars.clone(); + app + }) + .collect::>(); + + let mut environment_delete = environment.clone(); + environment_delete.action = Action::Delete; + let ea = environment.clone(); + let ea_delete = environment_delete.clone(); + + let (localisation, kubernetes_version) = match provider_kind { + Kind::Aws => (AWS_TEST_REGION.to_string(), AWS_KUBERNETES_VERSION.to_string()), + Kind::Do => (DO_TEST_REGION.to_string(), DO_KUBERNETES_VERSION.to_string()), + Kind::Scw => (SCW_TEST_ZONE.to_string(), SCW_KUBERNETES_VERSION.to_string()), + }; + + let engine_config = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context, + logger.clone(), + localisation.as_str(), + kubernetes_version.clone(), + &ClusterDomain::Default, + None, + ), + }; + + let ret = environment.deploy_environment(&ea, logger.clone(), &engine_config); + assert!(matches!(ret, TransactionResult::Ok)); + + match database_mode.clone() { + DatabaseMode::CONTAINER => { + match get_pvc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(pvc) => assert_eq!( + pvc.items.expect("No items in pvc")[0].spec.resources.requests.storage, + format!("{}Gi", storage_size) + ), + Err(_) => assert!(false), + }; + + match get_svc(context.clone(), provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(svc) => assert_eq!( + svc.items + .expect("No items in svc") + .into_iter() + .filter(|svc| svc.metadata.name == database_host && &svc.spec.svc_type == "LoadBalancer") + .collect::>() + .len(), + match is_public { + true => 1, + false => 0, + } + ), + Err(_) => assert!(false), + }; + } + DatabaseMode::MANAGED => { + match get_svc(context, provider_kind.clone(), environment.clone(), secrets.clone()) { + Ok(svc) => { + let service = svc + .items + .expect("No items in svc") + .into_iter() + .filter(|svc| svc.metadata.name == database_host && svc.spec.svc_type == "ExternalName") + .collect::>(); + let annotations = &service[0].metadata.annotations; + assert_eq!(service.len(), 1); + match is_public { + true => { + assert!(annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")); + assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_fqdn); + } + false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")), + } + } + Err(_) => assert!(false), + }; + } + } + + let engine_config_for_delete = match provider_kind { + Kind::Aws => AWS::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Do => DO::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + Kind::Scw => Scaleway::docker_cr_engine( + &context_for_delete, + logger.clone(), + localisation.as_str(), + kubernetes_version, + &ClusterDomain::Default, + None, + ), + }; + + // let ret = environment_delete.delete_environment(&ea_delete, logger, &engine_config_for_delete); + assert!(matches!(ret, TransactionResult::Ok)); + + return test_name.to_string(); +} diff --git a/tests/aws/aws_databases.rs b/tests/aws/aws_databases.rs index 30346270..c32f608f 100644 --- a/tests/aws/aws_databases.rs +++ b/tests/aws/aws_databases.rs @@ -14,7 +14,7 @@ use self::test_utilities::utilities::{ use qovery_engine::io_models::DatabaseMode::{CONTAINER, MANAGED}; use qovery_engine::transaction::TransactionResult; use qovery_engine::utilities::to_short_id; -use test_utilities::common::{test_db, Infrastructure}; +use test_utilities::common::{test_db, test_db_on_upgrade, Infrastructure}; /** ** @@ -664,6 +664,39 @@ fn test_mysql_configuration(version: &str, test_name: &str, database_mode: Datab }) } +#[allow(dead_code)] +fn test_mysql_configuration_on_upgrade(version: &str, test_name: &str, database_mode: DatabaseMode, is_public: bool) { + let secrets = FuncTestsSecrets::new(); + let context = context( + secrets + .AWS_TEST_ORGANIZATION_ID + .as_ref() + .expect("AWS_TEST_ORGANIZATION_ID is not set") + .as_str(), + secrets + .AWS_TEST_CLUSTER_ID + .as_ref() + .expect("AWS_TEST_CLUSTER_ID is not set") + .as_str(), + ); + let environment = test_utilities::common::database_test_environment_on_upgrade(&context); + + engine_run_test(|| { + test_db_on_upgrade( + context, + logger(), + environment, + secrets, + version, + test_name, + DatabaseKind::Mysql, + Kind::Aws, + database_mode, + is_public, + ) + }) +} + // MySQL self-hosted environment #[cfg(feature = "test-aws-self-hosted")] #[named] @@ -687,6 +720,14 @@ fn private_mysql_v8_deploy_a_working_dev_environment() { test_mysql_configuration("8.0", function_name!(), CONTAINER, false); } +#[cfg(feature = "test-aws-self-hosted")] +#[named] +#[test] +#[ignore] +fn private_mysql_v8_deploy_a_working_dev_environment_on_upgrade() { + test_mysql_configuration_on_upgrade("8.0", function_name!(), CONTAINER, false); +} + #[cfg(feature = "test-aws-self-hosted")] #[named] #[test] diff --git a/tests/helm/cert_manager.rs b/tests/helm/cert_manager.rs new file mode 100644 index 00000000..f356bc21 --- /dev/null +++ b/tests/helm/cert_manager.rs @@ -0,0 +1,348 @@ +use base64::decode; +use qovery_engine::cloud_provider::helm::{ + deploy_charts_levels, ChartInfo, ChartSetValue, CommonChart, HelmChart, HelmChartNamespaces, +}; +use qovery_engine::cmd::helm::Helm; +use qovery_engine::cmd::kubectl::{kubectl_exec_delete_namespace, kubectl_exec_get_secrets, kubectl_get_resource_yaml}; +use qovery_engine::cmd::structs::SecretItem; +use qovery_engine::fs::list_yaml_backup_files; +use serde_derive::Deserialize; +use serde_derive::Serialize; +use std::fs; +use std::fs::OpenOptions; +use std::io::{BufRead, BufReader}; +use std::path::{Path, PathBuf}; +use std::str::from_utf8; +use std::thread::sleep; +use std::time::Duration; +use tempdir::TempDir; +use test_utilities::utilities::FuncTestsSecrets; + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Certificate { + pub api_version: String, + pub items: Vec, + pub kind: String, + pub metadata: Metadata2, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Item { + pub api_version: String, + pub kind: String, + pub metadata: Metadata, + pub spec: Spec, + pub status: Status, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Metadata { + pub annotations: Annotations, + pub creation_timestamp: String, + pub generation: i64, + pub labels: Labels, + pub name: String, + pub namespace: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Annotations { + #[serde(rename = "meta.helm.sh/release-name")] + pub meta_helm_sh_release_name: String, + #[serde(rename = "meta.helm.sh/release-namespace")] + pub meta_helm_sh_release_namespace: String, + #[serde(default, rename = "kubectl.kubernetes.io/last-applied-configuration")] + pub last_applied_configuration: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Labels { + #[serde(rename = "app.kubernetes.io/managed-by")] + pub app_kubernetes_io_managed_by: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Spec { + pub dns_names: Vec, + pub issuer_ref: IssuerRef, + pub secret_name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IssuerRef { + pub kind: String, + pub name: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Status {} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Metadata2 { + pub self_link: String, +} + +fn cert_manager_conf() -> (Helm, PathBuf, CommonChart, CommonChart) { + let vault_secrets = FuncTestsSecrets::new(); + let mut kube_config = dirs::home_dir().unwrap(); + kube_config.push(".kube/config"); + let helm = Helm::new(kube_config.to_str().unwrap(), &[]).unwrap(); + let cert_manager = CommonChart { + chart_info: ChartInfo { + name: "cert-manager".to_string(), + path: "lib/common/bootstrap/charts/cert-manager".to_string(), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "installCRDs".to_string(), + value: "true".to_string(), + }, + ChartSetValue { + key: "replicaCount".to_string(), + value: "1".to_string(), + }, + // https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + ChartSetValue { + key: "extraArgs".to_string(), + value: "{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=1.1.1.1:53\\,8.8.8.8:53}" + .to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.enabled".to_string(), + // Due to cycle, prometheus need tls certificate from cert manager, and enabling this will require + // prometheus to be already installed + value: "false".to_string(), + }, + ChartSetValue { + key: "prometheus.servicemonitor.prometheusInstance".to_string(), + value: "qovery".to_string(), + }, + ], + ..Default::default() + }, + }; + + let cert_manager_config = CommonChart { + chart_info: ChartInfo { + name: "cert-manager-configs".to_string(), + path: "lib/common/bootstrap/charts/cert-manager-configs".to_string(), + namespace: HelmChartNamespaces::CertManager, + values: vec![ + ChartSetValue { + key: "externalDnsProvider".to_string(), + value: "cloudflare".to_string(), + }, + ChartSetValue { + key: "provider.cloudflare.apiToken".to_string(), + value: vault_secrets.CLOUDFLARE_TOKEN.unwrap().to_string(), + }, + ChartSetValue { + key: "provider.cloudflare.email".to_string(), + value: vault_secrets.CLOUDFLARE_ID.as_ref().unwrap().to_string(), + }, + ChartSetValue { + key: "acme.letsEncrypt.emailReport".to_string(), + value: vault_secrets.CLOUDFLARE_ID.unwrap().to_string(), + }, + ChartSetValue { + key: "acme.letsEncrypt.acmeUrl".to_string(), + value: "https://acme-staging-v02.api.letsencrypt.org/directory".to_string(), + }, + ], + ..Default::default() + }, + }; + + (helm, kube_config, cert_manager, cert_manager_config) +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_create_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + let backup_infos = helm + .prepare_chart_backup(root_dir_path, &cert_manager.chart_info, &vec![], vec!["cert".to_string()]) + .unwrap(); + let secrets = kubectl_exec_get_secrets( + kube_config.as_path(), + cert_manager.chart_info.namespace.to_string().as_str(), + "", + vec![], + ) + .unwrap(); + assert_eq!(backup_infos.len(), 1); + + for backup_info in backup_infos { + let backup_name = format!("{}-{}-q-backup", &cert_manager.chart_info.name, backup_info.name.clone()); + assert!(Path::new(backup_info.path.as_str()).exists()); + let secret = secrets + .items + .clone() + .into_iter() + .filter(|secret| secret.metadata.name == backup_name) + .collect::>(); + let secret_content = decode(secret[0].data[&backup_info.name].clone()).unwrap(); + let content = from_utf8(secret_content.as_slice()).unwrap().to_string(); + let file = OpenOptions::new().read(true).open(backup_info.path.as_str()).unwrap(); + let file_content = BufReader::new(file.try_clone().unwrap()) + .lines() + .map(|line| line.unwrap()) + .collect::>() + .join("\n"); + assert_ne!(content.len(), 0); + assert_ne!(file_content.len(), 0); + assert!(content.contains(&file_content)); + } + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_apply_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + let _ = helm + .prepare_chart_backup( + root_dir_path, + cert_manager_config.get_chart_info(), + &vec![], + vec!["cert".to_string()], + ) + .unwrap(); + + match helm.apply_chart_backup(root_dir_path, &vec![], cert_manager_config.get_chart_info()) { + Err(_) => { + assert!(false) + } + Ok(..) => { + let string_path = list_yaml_backup_files(root_dir_path).unwrap().first().unwrap().clone(); + let str_path = string_path.as_str(); + let path = Path::new(str_path); + let backup_string = fs::read_to_string(path).unwrap(); + let cert_string = kubectl_get_resource_yaml( + kube_config.as_path(), + vec![], + "cert", + Some(cert_manager_config.namespace().as_str()), + ) + .unwrap(); + let backup_cert = serde_yaml::from_str::(backup_string.as_str()).unwrap(); + let cert = serde_yaml::from_str::(cert_string.as_str()).unwrap(); + assert_eq!(backup_cert.items.first().unwrap().spec, cert.items.first().unwrap().spec) + } + }; + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_should_not_create_chart_backup() { + let (helm, kube_config, cert_manager, cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + let tmp_dir = TempDir::new("workspace_directory").expect("error creating temporary dir"); + let root_dir_path = Path::new(tmp_dir.path()); + + // trying to create a backup from an unknown (toto) resource + let backup_infos = helm + .prepare_chart_backup(root_dir_path, &cert_manager.chart_info, &vec![], vec!["toto".to_string()]) + .unwrap(); + + assert_eq!(backup_infos.len(), 0); + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} + +#[cfg(feature = "test-with-kube")] +#[test] +fn test_should_apply_chart_backup() { + let (helm, kube_config, cert_manager, mut cert_manager_config) = cert_manager_conf(); + + let lvl_1: Vec> = vec![Box::new(cert_manager.clone())]; + let lvl_2: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_1], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2], false).map_err(|_| assert!(false)); + + sleep(Duration::from_secs(30)); + + cert_manager_config.chart_info.backup_resources = Some(vec!["cert".to_string()]); + + let lvl_2_bis: Vec> = vec![Box::new(cert_manager_config.clone())]; + + let _ = deploy_charts_levels(kube_config.as_path(), &vec![], vec![lvl_2_bis], false).map_err(|_| assert!(false)); + + let secrets = kubectl_exec_get_secrets( + kube_config.as_path(), + cert_manager.chart_info.namespace.to_string().as_str(), + "", + vec![], + ) + .unwrap(); + + let cert_secret = secrets + .items + .into_iter() + .filter(|secret| secret.metadata.name == "cert-manager-configs-cert-q-backup") + .collect::>(); + + assert_eq!(cert_secret.len(), 0); + + let cert_string = kubectl_get_resource_yaml( + kube_config.as_path(), + vec![], + "cert", + Some(cert_manager_config.namespace().as_str()), + ) + .unwrap(); + let cert = serde_yaml::from_str::(cert_string.as_str()).unwrap(); + + assert_ne!(cert.items[0].metadata.annotations.last_applied_configuration, ""); + + let _ = kubectl_exec_delete_namespace(kube_config.as_path(), "cert-manager", vec![]); +} diff --git a/tests/helm/mod.rs b/tests/helm/mod.rs new file mode 100644 index 00000000..3a720572 --- /dev/null +++ b/tests/helm/mod.rs @@ -0,0 +1 @@ +mod cert_manager; diff --git a/tests/lib.rs b/tests/lib.rs index bbc13eb3..48aaa913 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -3,4 +3,5 @@ extern crate maplit; mod aws; mod digitalocean; +mod helm; mod scaleway;