feat: adding engine autoscaler and prometheus-adapter

This commit is contained in:
Pierre Mavro
2021-02-26 10:16:56 +01:00
committed by Pierre Mavro
parent 9b92e3fd8c
commit 5fcdd2ef6f
39 changed files with 1338 additions and 11 deletions

View File

@@ -0,0 +1,61 @@
resource "helm_release" "prometheus-adapter" {
name = "prometheus-adapter"
chart = "common/charts/prometheus-adapter"
namespace = helm_release.prometheus_operator.namespace
atomic = true
max_history = 50
// make a fake arg to avoid TF to validate update on failure because of the atomic option
set {
name = "fake"
value = timestamp()
}
set {
name = "metricsRelistInterval"
value = "30s"
}
set {
name = "prometheus.url"
value = "prometheus-operated.${helm_release.prometheus_operator.namespace}.svc"
}
# PDB
set {
name = "podDisruptionBudget.enabled"
value = "true"
}
set {
name = "podDisruptionBudget.maxUnavailable"
value = "1"
}
# Limits
set {
name = "resources.limits.cpu"
value = "100m"
}
set {
name = "resources.requests.cpu"
value = "100m"
}
set {
name = "resources.limits.memory"
value = "128Mi"
}
set {
name = "resources.requests.memory"
value = "128Mi"
}
depends_on = [
aws_eks_cluster.eks_cluster,
helm_release.aws_vpc_cni,
helm_release.prometheus_operator,
]
}

View File

@@ -115,5 +115,6 @@ resource "helm_release" "qovery_engine_resources" {
aws_eks_cluster.eks_cluster,
helm_release.aws_vpc_cni,
helm_release.cluster_autoscaler,
helm_release.prometheus-adapter,
]
}

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,20 @@
apiVersion: v1
appVersion: v0.8.3
description: A Helm chart for k8s prometheus adapter
home: https://github.com/DirectXMan12/k8s-prometheus-adapter
keywords:
- hpa
- metrics
- prometheus
- adapter
maintainers:
- email: mattias.gees@jetstack.io
name: mattiasgees
- name: steven-sheehy
- email: hfernandez@mesosphere.com
name: hectorj2f
name: prometheus-adapter
sources:
- https://github.com/kubernetes/charts
- https://github.com/DirectXMan12/k8s-prometheus-adapter
version: 2.12.1

View File

@@ -0,0 +1,147 @@
# Prometheus Adapter
Installs the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) for the Custom Metrics API. Custom metrics are used in Kubernetes by [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale workloads based upon your own metric pulled from an external metrics provider like Prometheus. This chart complements the [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server) chart that provides resource only metrics.
## Prerequisites
Kubernetes 1.14+
## Get Repo Info
```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm 3
$ helm install [RELEASE_NAME] prometheus-community/prometheus-adapter
# Helm 2
$ helm install --name [RELEASE_NAME] prometheus-community/prometheus-adapter
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
# Helm 3
$ helm uninstall [RELEASE_NAME]
# Helm 2
# helm delete --purge [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
# Helm 3 or 2
$ helm upgrade [RELEASE_NAME] [CHART] --install
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands:
```console
# Helm 2
$ helm inspect values prometheus-community/prometheus-adapter
# Helm 3
$ helm show values prometheus-community/prometheus-adapter
```
### Prometheus Service Endpoint
To use the chart, ensure the `prometheus.url` and `prometheus.port` are configured with the correct Prometheus service endpoint. If Prometheus is exposed under HTTPS the host's CA Bundle must be exposed to the container using `extraVolumes` and `extraVolumeMounts`.
### Adapter Rules
Additionally, the chart comes with a set of default rules out of the box but they may pull in too many metrics or not map them correctly for your needs. Therefore, it is recommended to populate `rules.custom` with a list of rules (see the [config document](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md) for the proper format).
### Horizontal Pod Autoscaler Metrics
Finally, to configure your Horizontal Pod Autoscaler to use the custom metric, see the custom metrics section of the [HPA walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics).
The Prometheus Adapter can serve three different [metrics APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis):
### Custom Metrics
Enabling this option will cause custom metrics to be served at `/apis/custom.metrics.k8s.io/v1beta1`. Enabled by default when `rules.default` is true, but can be customized by populating `rules.custom`:
```yaml
rules:
custom:
- seriesQuery: '{__name__=~"^some_metric_count$"}'
resources:
template: <<.Resource>>
name:
matches: ""
as: "my_custom_metric"
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
```
### External Metrics
Enabling this option will cause external metrics to be served at `/apis/external.metrics.k8s.io/v1beta1`. Can be enabled by populating `rules.external`:
```yaml
rules:
external:
- seriesQuery: '{__name__=~"^some_metric_count$"}'
resources:
template: <<.Resource>>
name:
matches: ""
as: "my_external_metric"
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
```
### Resource Metrics
Enabling this option will cause resource metrics to be served at `/apis/metrics.k8s.io/v1beta1`. Resource metrics will allow pod CPU and Memory metrics to be used in [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) as well as the `kubectl top` command. Can be enabled by populating `rules.resource`:
```yaml
rules:
resource:
cpu:
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>)
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
window: 3m
```
**NOTE:** Setting a value for `rules.resource` will also deploy the resource metrics API service, providing the same functionality as [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server). As such it is not possible to deploy them both in the same cluster.

View File

@@ -0,0 +1,9 @@
rules:
external:
- seriesQuery: '{__name__=~"^some_metric_count$"}'
resources:
template: <<.Resource>>
name:
matches: ""
as: "my_custom_metric"
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)

View File

@@ -0,0 +1,43 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "k8s-prometheus-adapter.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "k8s-prometheus-adapter.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "k8s-prometheus-adapter.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "k8s-prometheus-adapter.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "k8s-prometheus-adapter.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,48 @@
{{- if .Values.certManager.enabled -}}
---
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer
spec:
selfSigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert
spec:
secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert
duration: {{ .Values.certManager.caCertDuration }}
issuerRef:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer
commonName: "ca.webhook.prometheus-adapter"
isCA: true
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer
spec:
ca:
secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert
---
# Finally, generate a serving certificate for the apiservices to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-cert
spec:
secretName: {{ template "k8s-prometheus-adapter.fullname" . }}
duration: {{ .Values.certManager.certDuration }}
issuerRef:
name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer
dnsNames:
- {{ template "k8s-prometheus-adapter.fullname" . }}
- {{ template "k8s-prometheus-adapter.fullname" . }}.{{ .Release.Namespace }}
- {{ template "k8s-prometheus-adapter.fullname" . }}.{{ .Release.Namespace }}.svc
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-system-auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,23 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- configmaps
verbs:
- get
- list
- watch
{{- end -}}

View File

@@ -0,0 +1,96 @@
{{- if not .Values.rules.existing -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.yaml: |
{{- if or .Values.rules.default .Values.rules.custom }}
rules:
{{- if .Values.rules.default }}
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters: []
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m]))
by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_seconds_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m]))
by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
seriesFilters:
- isNot: ^container_.*_total$
resources:
overrides:
namespace:
resource: namespace
pod:
resource: pod
name:
matches: ^container_(.*)$
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_total$
resources:
template: <<.Resource>>
name:
matches: ""
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_seconds_total
resources:
template: <<.Resource>>
name:
matches: ^(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters: []
resources:
template: <<.Resource>>
name:
matches: ^(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>)
{{- end -}}
{{- if .Values.rules.custom }}
{{ toYaml .Values.rules.custom | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.rules.external }}
externalRules:
{{ toYaml .Values.rules.external | indent 4 }}
{{- end -}}
{{- if .Values.rules.resource }}
resourceRules:
{{ toYaml .Values.rules.resource | indent 6 }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,32 @@
{{- if or .Values.rules.default .Values.rules.custom }}
{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }}
apiVersion: apiregistration.k8s.io/v1
{{- else }}
apiVersion: apiregistration.k8s.io/v1beta1
{{- end }}
kind: APIService
metadata:
{{- if .Values.certManager.enabled }}
annotations:
certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
{{- end }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: v1beta1.custom.metrics.k8s.io
spec:
service:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
{{- if .Values.tls.enable }}
caBundle: {{ b64enc .Values.tls.ca }}
{{- end }}
group: custom.metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }}
groupPriorityMinimum: 100
versionPriority: 100
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- /*
This if must be aligned with custom-metrics-cluster-role.yaml
as otherwise this binding will point to not existing role.
*/ -}}
{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources
rules:
- apiGroups:
- custom.metrics.k8s.io
resources: ["*"]
verbs: ["*"]
{{- end -}}

View File

@@ -0,0 +1,131 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ template "k8s-prometheus-adapter.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.podLabels }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
name: {{ template "k8s-prometheus-adapter.name" . }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
{{- if .Values.hostNetwork.enabled }}
hostNetwork: true
{{- end }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- end}}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /adapter
- --secure-port={{ .Values.listenPort }}
{{- if or .Values.tls.enable .Values.certManager.enabled }}
- --tls-cert-file=/var/run/serving-cert/tls.crt
- --tls-private-key-file=/var/run/serving-cert/tls.key
{{- end }}
- --cert-dir=/tmp/cert
- --logtostderr=true
- --prometheus-url={{ tpl .Values.prometheus.url . }}{{ if .Values.prometheus.port }}:{{ .Values.prometheus.port }}{{end}}{{ .Values.prometheus.path }}
- --metrics-relist-interval={{ .Values.metricsRelistInterval }}
- --v={{ .Values.logLevel }}
- --config=/etc/adapter/config.yaml
{{- if .Values.extraArguments }}
{{- toYaml .Values.extraArguments | trim | nindent 8 }}
{{- end }}
ports:
- containerPort: {{ .Values.listenPort }}
name: https
livenessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 30
{{- if .Values.resources }}
resources:
{{- toYaml .Values.resources | nindent 10 }}
{{- end }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{ toYaml . | indent 8 }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["all"]
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 10001
volumeMounts:
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }}
{{ end }}
- mountPath: /etc/adapter/
name: config
readOnly: true
- mountPath: /tmp
name: tmp
{{- if or .Values.tls.enable .Values.certManager.enabled }}
- mountPath: /var/run/serving-cert
name: volume-serving-cert
readOnly: true
{{- end }}
nodeSelector:
{{- toYaml .Values.nodeSelector | nindent 8 }}
affinity:
{{- toYaml .Values.affinity | nindent 8 }}
priorityClassName: {{ .Values.priorityClassName }}
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
volumes:
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | trim | nindent 6 }}
{{ end }}
- name: config
configMap:
name: {{ .Values.rules.existing | default (include "k8s-prometheus-adapter.fullname" . ) }}
- name: tmp
emptyDir: {}
{{- if or .Values.tls.enable .Values.certManager.enabled }}
- name: volume-serving-cert
secret:
secretName: {{ template "k8s-prometheus-adapter.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.rules.external }}
{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }}
apiVersion: apiregistration.k8s.io/v1
{{- else }}
apiVersion: apiregistration.k8s.io/v1beta1
{{- end }}
kind: APIService
metadata:
{{- if .Values.certManager.enabled }}
annotations:
certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
{{- end }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: v1beta1.external.metrics.k8s.io
spec:
service:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
{{- if .Values.tls.enable }}
caBundle: {{ b64enc .Values.tls.ca }}
{{- end }}
group: external.metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }}
groupPriorityMinimum: 100
versionPriority: 100
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if and .Values.rbac.create .Values.rules.external -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-external-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics
subjects:
- kind: ServiceAccount
name: horizontal-pod-autoscaler
namespace: kube-system
{{- end -}}

View File

@@ -0,0 +1,20 @@
{{- if and .Values.rbac.create .Values.rules.external -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics
rules:
- apiGroups:
- "external.metrics.k8s.io"
resources:
- "*"
verbs:
- list
- get
- watch
{{- end -}}

View File

@@ -0,0 +1,22 @@
{{- if .Values.podDisruptionBudget.enabled }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ template "k8s-prometheus-adapter.name" . }}
release: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,68 @@
{{- if .Values.psp.create -}}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.hostNetwork.enabled }}
hostNetwork: true
{{- end }}
fsGroup:
rule: RunAsAny
runAsGroup:
rule: RunAsAny
runAsUser:
rule: MustRunAs
ranges:
- min: 1024
max: 65535
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- emptyDir
- configMap
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-psp
rules:
- apiGroups:
- 'policy'
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "k8s-prometheus-adapter.fullname" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "k8s-prometheus-adapter.name" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,32 @@
{{- if .Values.rules.resource}}
{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }}
apiVersion: apiregistration.k8s.io/v1
{{- else }}
apiVersion: apiregistration.k8s.io/v1beta1
{{- end }}
kind: APIService
metadata:
{{- if .Values.certManager.enabled }}
annotations:
certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }}
{{- end }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: v1beta1.metrics.k8s.io
spec:
service:
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
{{- if .Values.tls.enable }}
caBundle: {{ b64enc .Values.tls.ca }}
{{- end }}
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }}
groupPriorityMinimum: 100
versionPriority: 100
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- if and .Values.rbac.create .Values.rules.resource -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "k8s-prometheus-adapter.name" . }}-metrics
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,22 @@
{{- if and .Values.rbac.create .Values.rules.resource -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-metrics
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
{{- end -}}

View File

@@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.name" . }}-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
{{- end -}}

View File

@@ -0,0 +1,15 @@
{{- if .Values.tls.enable -}}
apiVersion: v1
kind: Secret
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.fullname" . }}
type: kubernetes.io/tls
data:
tls.crt: {{ b64enc .Values.tls.certificate }}
tls.key: {{ b64enc .Values.tls.key }}
{{- end -}}

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.service.port }}
protocol: TCP
targetPort: https
selector:
app: {{ template "k8s-prometheus-adapter.name" . }}
release: {{ .Release.Name }}
type: {{ .Values.service.type }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "k8s-prometheus-adapter.name" . }}
chart: {{ template "k8s-prometheus-adapter.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -0,0 +1,176 @@
# Default values for k8s-prometheus-adapter..
affinity: {}
image:
repository: directxman12/k8s-prometheus-adapter-amd64
tag: v0.8.3
pullPolicy: IfNotPresent
logLevel: 4
metricsRelistInterval: 1m
listenPort: 6443
nodeSelector: {}
priorityClassName: ""
# Url to access prometheus
prometheus:
# Value is templated
url: http://prometheus.default.svc
port: 9090
path: ""
replicas: 1
rbac:
# Specifies whether RBAC resources should be created
create: true
psp:
# Specifies whether PSP resources should be created
create: false
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Custom DNS configuration to be added to prometheus-adapter pods
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
resources: {}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 100m
# memory: 128Mi
rules:
default: true
custom: []
# - seriesQuery: '{__name__=~"^some_metric_count$"}'
# resources:
# template: <<.Resource>>
# name:
# matches: ""
# as: "my_custom_metric"
# metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
# Mounts a configMap with pre-generated rules for use. Overrides the
# default, custom, external and resource entries
existing:
external: []
# - seriesQuery: '{__name__=~"^some_metric_count$"}'
# resources:
# template: <<.Resource>>
# name:
# matches: ""
# as: "my_external_metric"
# metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
resource: {}
# cpu:
# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>)
# nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>)
# resources:
# overrides:
# instance:
# resource: node
# namespace:
# resource: namespace
# pod:
# resource: pod
# containerLabel: container
# memory:
# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
# nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
# resources:
# overrides:
# instance:
# resource: node
# namespace:
# resource: namespace
# pod:
# resource: pod
# containerLabel: container
# window: 3m
service:
annotations: {}
port: 443
type: ClusterIP
tls:
enable: false
ca: |-
# Public CA file that signed the APIService
key: |-
# Private key of the APIService
certificate: |-
# Public key of the APIService
# Any extra arguments
extraArguments: []
# - --tls-private-key-file=/etc/tls/tls.key
# - --tls-cert-file=/etc/tls/tls.crt
# Any extra volumes
extraVolumes: []
# - name: example-name
# hostPath:
# path: /path/on/host
# type: DirectoryOrCreate
# - name: ssl-certs
# hostPath:
# path: /etc/ssl/certs/ca-bundle.crt
# type: File
# Any extra volume mounts
extraVolumeMounts: []
# - name: example-name
# mountPath: /path/in/container
# - name: ssl-certs
# mountPath: /etc/ssl/certs/ca-certificates.crt
# readOnly: true
tolerations: []
# Labels added to the pod
podLabels: {}
# Annotations added to the pod
podAnnotations: {}
hostNetwork:
# Specifies if prometheus-adapter should be started in hostNetwork mode.
#
# You would require this enabled if you use alternate overlay networking for pods and
# API server unable to communicate with metrics-server. As an example, this is required
# if you use Weave network on EKS. See also dnsPolicy
enabled: false
# When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet
# dnsPolicy: ClusterFirstWithHostNet
podDisruptionBudget:
# Specifies if PodDisruptionBudget should be enabled
# When enabled, minAvailable or maxUnavailable should also be defined.
enabled: false
minAvailable:
maxUnavailable: 1
certManager:
enabled: false
caCertDuration: 43800h
certDuration: 8760h

View File

@@ -0,0 +1,25 @@
{{- if .Values.autoscaler.enabled }}
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: qovery-engine
namespace: {{ .Values.namespace }}
labels:
{{- include "qovery-engine.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: StatefulSet
name: qovery-engine
minReplicas: {{ .Values.autoscaler.min_replicas }}
maxReplicas: {{ .Values.autoscaler.max_replicas }}
# todo: k8s 1.18 is required to support scaling policies
metrics:
- type: Pods
pods:
metric:
name: taskmanager_nb_running_tasks
target:
type: AverageValue
averageValue: 1m
{{- end }}

View File

@@ -1,14 +1,12 @@
{{ if gt .Values.replicaCount 1.0 }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: qovery-engine
namespace: qovery
namespace: {{ .Values.namespace }}
labels:
app: qovery-engine
{{- include "qovery-engine.labels" . | nindent 4 }}
spec:
minAvailable: "50%"
selector:
matchLabels:
app: qovery-engine
{{ end }}
app.kubernetes.io/instance: qovery-engine

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: qovery-engine
namespace: {{ .Values.namespace }}
labels:
{{- include "qovery-engine.labels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- name: {{ .Values.metrics.portName }}
port: {{ .Values.metrics.port }}
targetPort: {{ .Values.metrics.port }}
protocol: TCP
selector:
app.kubernetes.io/instance: qovery-engine

View File

@@ -0,0 +1,20 @@
{{- if .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: qovery-engine
namespace: {{ .Values.namespace }}
labels:
{{- include "qovery-engine.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "qovery-engine.labels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: {{ .Values.metrics.portName }}
interval: 30s
scrapeTimeout: 5s
{{- end }}

View File

@@ -6,8 +6,11 @@ metadata:
namespace: {{ .Values.namespace }}
labels:
{{- include "qovery-engine.labels" . | nindent 4 }}
{{- range $key, $val := .Values.labels }}
{{ $key }}: {{ $val }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
replicas: {{ .Values.autoscaler.min_replicas }}
serviceName: qovery-engine
selector:
matchLabels:
@@ -45,6 +48,9 @@ spec:
- name: qovery-engine
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: metrics
containerPort: {{ .Values.metrics.port }}
env:
{{ range $key, $value := .Values.environmentVariables -}}
- name: "{{ $key }}"

View File

@@ -2,23 +2,31 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
image:
repository: qoveryrd/engine
pullPolicy: IfNotPresent
tag: ""
namespace: "qovery"
metrics:
enabled: true
portName: "metrics"
port: 8080
imagePullSecrets: []
autoscaler:
enabled: true
min_replicas: 2
max_replicas: 10
labels:
app: qovery-engine
environmentVariables:
RUST_LOG: DEBUG,rusoto_core::request=info,hyper=info
#ENGINE_RES_URL: ""
RUST_LOG: DEBUG,rusoto_core::request=info,trust_dns_resolver=info,trust_dns_proto=info,reqwest::connect=info,hyper=info
#LIB_ROOT_DIR: ""
#DOCKER_HOST: ""
#QOVERY_NATS_URL: ""
#ORGANIZATION: ""
#CLOUD_PROVIDER: ""

View File

@@ -0,0 +1,60 @@
resource "helm_release" "prometheus-adapter" {
name = "prometheus-adapter"
chart = "common/charts/prometheus-adapter"
namespace = helm_release.prometheus_operator.namespace
atomic = true
max_history = 50
// make a fake arg to avoid TF to validate update on failure because of the atomic option
set {
name = "fake"
value = timestamp()
}
set {
name = "metricsRelistInterval"
value = "30s"
}
set {
name = "prometheus.url"
value = "prometheus-operated.${helm_release.prometheus_operator.namespace}.svc"
}
# PDB
set {
name = "podDisruptionBudget.enabled"
value = "true"
}
set {
name = "podDisruptionBudget.maxUnavailable"
value = "1"
}
# Limits
set {
name = "resources.limits.cpu"
value = "100m"
}
set {
name = "resources.requests.cpu"
value = "100m"
}
set {
name = "resources.limits.memory"
value = "128Mi"
}
set {
name = "resources.requests.memory"
value = "128Mi"
}
depends_on = [
digitalocean_kubernetes_cluster.kubernetes_cluster,
helm_release.prometheus_operator,
]
}

View File

@@ -112,6 +112,7 @@ resource "helm_release" "qovery_engine_resources" {
}
depends_on = [
digitalocean_kubernetes_cluster.kubernetes_cluster
digitalocean_kubernetes_cluster.kubernetes_cluster,
helm_release.prometheus-adapter,
]
}

View File

@@ -64,6 +64,9 @@ charts:
version: 0.1.3
dest: do-bootstrap
repo_name: do-k8s-token-rotate
- name: prometheus-adapter
version: 2.12.1
repo_name: prometheus-community
repos:
- name: stable
@@ -84,6 +87,8 @@ repos:
url: https://kubernetes.github.io/autoscaler
- name: do-k8s-token-rotate
url: https://qovery.github.io/do-k8s-token-rotate/
- name: prometheus-community
url: https://prometheus-community.github.io/helm-charts
destinations:
- name: default