Merge remote-tracking branch 'origin/dev' into dev

This commit is contained in:
Romaric Philogene
2020-11-15 16:40:04 +01:00
28 changed files with 1206 additions and 3060 deletions

1
Cargo.lock generated
View File

@@ -2626,6 +2626,7 @@ dependencies = [
"env_logger",
"qovery-engine",
"rand 0.7.3",
"reqwest 0.10.8",
"serde",
"serde_derive",
"serde_json",

View File

@@ -241,14 +241,14 @@ initChownData:
# Administrator credentials when not using an existing secret (see below)
adminUser: {{ grafana_admin_user }}
adminPassword: {{ grafana_admin_password }}
adminUser: "{{ grafana_admin_user }}"
adminPassword: "{{ grafana_admin_password }}"
# Use an existing secret for the admin user.
admin:
existingSecret: ""
userKey: admin-user
passwordKey: tbd
passwordKey: admin-password
## Define command to be executed at startup by grafana container
@@ -493,8 +493,8 @@ smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: ""
userKey: "qovery"
passwordKey: "tbd"
userKey: "user"
passwordKey: "password"
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards

View File

@@ -0,0 +1,40 @@
resource "helm_release" "pleco" {
count = var.test_cluster == "false" ? 0 : 1
name = "pleco"
chart = "common/charts/pleco"
namespace = "kube-system"
atomic = true
max_history = 50
// make a fake arg to avoid TF to validate update on failure because of the atomic option
set {
name = "fake"
value = timestamp()
}
set {
name = "environmentVariables.AWS_ACCESS_KEY_ID"
value = "{{ aws_access_key }}"
}
set {
name = "environmentVariables.AWS_SECRET_ACCESS_KEY"
value = "{{ aws_secret_key }}"
}
set {
name = "environmentVariables.AWS_DEFAULT_REGION"
value = "{{ aws_region }}"
}
set {
name = "environmentVariables.LOG_LEVEL"
value = "debug"
}
depends_on = [
aws_eks_cluster.eks_cluster,
helm_release.aws_vpc_cni,
]
}

View File

@@ -25,7 +25,7 @@ variable "vpc_cidr_block" {
variable "test_cluster" {
description = "Is this a test cluster?"
default = "false"
default = "{{ test_cluster }}"
type = string
}

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,9 @@
apiVersion: v2
appVersion: 0.1.7
description: Automatically removes Cloud managed services and Kubernetes resources
based on tags with TTL
home: https://github.com/Qovery/pleco
icon: https://github.com/Qovery/pleco/raw/main/assets/pleco_logo.png
name: pleco
type: application
version: 0.1.7

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubernetes.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubernetes.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubernetes.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubernetes.labels" -}}
helm.sh/chart: {{ include "kubernetes.chart" . }}
{{ include "kubernetes.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubernetes.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubernetes.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubernetes.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kubernetes.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,65 @@
{{- $kubefullname := include "kubernetes.fullname" . }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubernetes.fullname" . }}
labels:
{{- include "kubernetes.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "kubernetes.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "kubernetes.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kubernetes.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.plecoImageTag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: [ "pleco", "start" ]
args:
- --level
- {{ .Values.environmentVariables.LOG_LEVEL | default "info" }}
- --check-interval
- "{{ .Values.environmentVariables.CHECK_INTERVAL | default 120 }}"
{{ if .Values.environmentVariables.DRY_RUN }}
- --dry-run
{{ end }}
env:
{{ range $key, $value := .Values.environmentVariables -}}
- name: "{{ $key }}"
valueFrom:
secretKeyRef:
name: {{ $kubefullname }}
key: {{ $key }}
{{ end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "kubernetes.fullname" . }}
labels:
{{- include "kubernetes.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- toYaml .Values.environmentVariables | nindent 2 }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kubernetes.serviceAccountName" . }}
labels:
{{- include "kubernetes.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,54 @@
replicaCount: 1
image:
repository: qoveryrd/pleco
pullPolicy: IfNotPresent
plecoImageTag: "v0.1.7"
environmentVariables:
CHECK_INTERVAL: "120"
DRY_RUN: "true"
LOG_LEVEL: "info"
# AWS_ACCESS_KEY_ID: ""
# AWS_SECRET_ACCESS_KEY: ""
# AWS_DEFAULT_REGION: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,3 +1,4 @@
{{- $kubefullname := include "kubernetes.fullname" . }}
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -37,7 +38,7 @@ spec:
- name: "{{ $key }}"
valueFrom:
secretKeyRef:
name: qovery-agent
name: {{ $kubefullname }}
key: {{ $key }}
{{ end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}

View File

@@ -1,3 +1,4 @@
{{- $kubefullname := include "kubernetes.fullname" . }}
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -36,7 +37,7 @@ spec:
- name: "{{ $key }}"
valueFrom:
secretKeyRef:
name: qovery-engine
name: {{ $kubefullname }}
key: {{ $key }}
{{ end }}
resources:

View File

@@ -1,540 +0,0 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride
# namespaceOverride: my-global-namespace
image:
## Bitnami MongoDB registry
##
registry: docker.io
## Bitnami MongoDB image name
##
repository: bitnami/mongodb
## Bitnami MongoDB image tag
## ref: https://hub.docker.com/r/bitnami/mongodb/tags/
##
tag: "{{ version }}"
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns on Bitnami debugging in minideb-extras-base
## ref: https://github.com/bitnami/minideb-extras-base
debug: true
## String to partially override mongodb.fullname template (will maintain the release name)
##
# nameOverride:
nameOverride: {{ name }}
## String to fully override mongodb.fullname template
##
# fullnameOverride:
fullnameOverride: {{ name }}
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Add custom extra environment variables to all the MongoDB containers
# extraEnvVars:
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources:
limits: {}
requests:
cpu: "{{ total_cpus }}"
memory: "{{ total_ram_in_mib }}Mi"
## Enable authentication
## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/
#
usePassword: true
# existingSecret: name-of-existing-secret
## MongoDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run
##
mongodbRootPassword: {{ database_password }}
## MongoDB custom user and database
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run
##
mongodbUsername: {{ database_login }}
mongodbPassword: {{ database_password }}
mongodbDatabase: {{ name }}
## Whether enable/disable IPv6 on MongoDB
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6
##
mongodbEnableIPv6: false
## Whether enable/disable DirectoryPerDB on MongoDB
## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb
##
mongodbDirectoryPerDB: false
## MongoDB System Log configuration
## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level
##
mongodbSystemLogVerbosity: 0
mongodbDisableSystemLog: false
## MongoDB additional command line flags
##
## Can be used to specify command line flags, for example:
##
## mongodbExtraFlags:
## - "--wiredTigerCacheSizeGB=2"
mongodbExtraFlags: []
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Kubernetes Cluster Domain
clusterDomain: cluster.local
## Kubernetes service type
service:
## Specify an explicit service name.
# name: svc-mongo
## Provide any additional annotations which may be required.
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
type: LoadBalancer
# clusterIP: None
port: {{ private_port }}
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Specify the externalIP value ClusterIP service type.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
# externalIPs: []
## Specify the loadBalancerIP value for LoadBalancer service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
##
# loadBalancerIP:
## Specify the loadBalancerSourceRanges value for LoadBalancer service types.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges: []
## Use StatefulSet instead of Deployment when deploying standalone
useStatefulSet: true
## Setting up replication
## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication
#
replicaSet:
## Whether to create a MongoDB replica set for high availability or not
enabled: false
useHostnames: true
## Name of the replica set
##
name: rs0
## Key used for replica set authentication
##
# key: key
## Number of replicas per each node type
##
replicas:
secondary: 1
arbiter: 1
## Pod Disruption Budget
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
pdb:
enabled: true
minAvailable:
secondary: 1
arbiter: 1
# maxUnavailable:
# secondary: 1
# arbiter: 1
# Annotations to be added to the deployment or statefulsets
annotations: {}
# Additional abels to apply to the deployment or statefulsets
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ name }}
# Annotations to be added to MongoDB pods
podAnnotations: {}
# Additional pod labels to apply
podLabels: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Define separate resources per arbiter, which are less then primary or secondary
# used only when replica set is enabled
resourcesArbiter: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 256Mi
## Pod priority
## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: ""
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Define Separate nodeSelector for secondaries
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelectorSecondary: {}
## Define Separate nodeSelector for arbiter
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelectorArbiter: {}
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Define separate affinity for arbiter pod
affinityArbiter: {}
## Tolerations
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Array to add extra volumes
##
extraVolumes: []
## Array to add extra mounts (normally used with extraVolumes)
##
extraVolumeMounts: []
## Add sidecars to the arbiter pod
# used only when replica set is enabled
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecarsArbiter: []
## Array to add extra volumes to the arbiter
# used only when replica set is enabled
##
extraVolumesArbiter: []
## Array to add extra mounts (normally used with extraVolumes) to the arbiter
# used only when replica set is enabled
##
extraVolumeMountsArbiter: []
## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## MongoDB images.
##
mountPath: /bitnami/mongodb
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
## mongodb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# storageClassSecondary: "-"
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ database_disk_size_in_gib }}Gi
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ name }}
## Configure the ingress resource that allows you to access the
## MongoDB installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: mongodb.local
path: /
## The tls configuration for the ingress
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
tls:
- hosts:
- mongodb.local
secretName: mongodb.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: airflow.local-tls
# key:
# certificate:
## Configure the options for init containers to be run before the main app containers
## are started. All init containers are run sequentially and must exit without errors
## for the next one to be started.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
# extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
# Define custom config map with init scripts
initConfigMap: {}
# name: "init-config-map"
## Entries for the MongoDB config file. For documentation of all options, see:
## http://docs.mongodb.org/manual/reference/configuration-options/
##
configmap:
# # where and how to store data.
# storage:
# dbPath: /bitnami/mongodb/data/db
# journal:
# enabled: true
# directoryPerDB: false
# # where to write logging data.
# systemLog:
# destination: file
# quiet: false
# logAppend: true
# logRotate: reopen
# path: /opt/bitnami/mongodb/logs/mongodb.log
# verbosity: 0
# # network interfaces
# net:
# port: 27017
# unixDomainSocket:
# enabled: true
# pathPrefix: /opt/bitnami/mongodb/tmp
# ipv6: false
# bindIpAll: true
# # replica set options
# #replication:
# #replSetName: replicaset
# #enableMajorityReadConcern: true
# # process management options
# processManagement:
# fork: false
# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid
# # set parameter options
# setParameter:
# enableLocalhostAuthBypass: true
# # security options
# security:
# authorization: disabled
# #keyFile: /opt/bitnami/mongodb/conf/keyfile
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/mongodb-exporter
tag: 0.11.0-debian-10-r45
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String with extra arguments to the metrics exporter
## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go
extraArgs: ""
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Metrics exporter liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
## Metrics exporter pod Annotation
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9216"
## Prometheus Service Monitor
## ref: https://github.com/coreos/prometheus-operator
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md
serviceMonitor:
## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
enabled: false
## Specify a namespace if needed
# namespace: monitoring
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}
## Specify Metric Relabellings to add to the scrape endpoint
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
# relabellings:
alerting:
## Define individual alerting rules as required
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup
## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
rules: {}
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}

View File

@@ -1,600 +0,0 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami MySQL image
## ref: https://hub.docker.com/r/bitnami/mysql/tags/
##
image:
debug: false
registry: docker.io
repository: bitnami/mysql
tag: "{{ version }}"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override mysql.fullname template (will maintain the release name)
##
# nameOverride:
nameOverride: {{ name }}
## String to fully override mysql.fullname template
##
# fullnameOverride:
fullnameOverride: {{ name }}
## Cluster domain
##
clusterDomain: cluster.local
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ name }}
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests:
cpu: "{{ database_total_cpus }}"
memory: "{{ database_ram_size_in_mib }}Mi"
## Use existing secret (ignores root, db and replication passwords)
##
# existingSecret:
## Admin (root) credentials
##
root:
## MySQL admin password
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run
##
password: {{ database_password }}
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount admin password as a file instead of using an environment variable
##
injectSecretsAsVolume: true
## Custom user/db credentials
##
db:
## MySQL username and password
## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-user-on-first-run
## Note that this user should be different from the MySQL replication user (replication.user)
##
user: {{ database_login }}
password: {{ database_password }}
## Database to create
## ref: https://github.com/bitnami/bitnami-docker-mysql#creating-a-database-on-first-run
##
name: {{ name }}
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount replication user password as a file instead of using an environment variable
##
injectSecretsAsVolume: true
## Replication configuration
##
replication:
## Enable replication. This enables the creation of replicas of MySQL. If false, only a
## master deployment would be created
##
enabled: false
##
## MySQL replication user
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster
## Note that this user should be different from the MySQL user (db.user)
##
user: replicator
## MySQL replication user password
## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster
##
password:
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: true
## Mount replication user password as a file instead of using an environment variable
##
injectSecretsAsVolume: false
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
#
## ConfigMap with scripts to be run at first boot
## Note: This will override initdbScripts
# initdbScriptsConfigMap:
serviceAccount:
create: true
## Specify the name of the service account created/used
# name:
## Master nodes parameters
##
master:
## Configure MySQL with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
default_authentication_plugin=mysql_native_password
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
plugin_dir=/opt/bitnami/mysql/plugin
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
## updateStrategy for master nodes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
extraEnvVars:
## ConfigMap with extra env vars:
##
extraEnvVarsCM:
## Secret with extra env vars:
##
extraEnvVarsSecret:
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## MySQL master pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## MySQL master containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## Example:
## containerSecurityContext:
## capabilities:
## drop: ["NET_RAW"]
## readOnlyRootFilesystem: true
##
containerSecurityContext: {}
## MySQL master containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 250m
# memory: 256Mi
requests: {}
# cpu: 250m
# memory: 256Mi
## MySQL master containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Enable persistence using PVCs on master nodes
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
mountPath: /bitnami/mysql
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
storageClass: "aws-ebs-gp2-0"
## PVC annotations
##
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ name }}
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: {{ database_disk_size_in_gib }}Gi
## Use an existing PVC
##
# existingClaim:
## Slave nodes parameters
##
slave:
## Number of slave replicas
##
replicas: 2
## Configure MySQL slave with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
default_authentication_plugin=mysql_native_password
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
## updateStrategy for slave nodes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
##
updateStrategy:
type: RollingUpdate
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
extraEnvVars:
## ConfigMap with extra env vars:
##
extraEnvVarsCM:
## Secret with extra env vars:
##
extraEnvVarsSecret:
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## MySQL slave pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## MySQL slave containers' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## Example:
## containerSecurityContext:
## capabilities:
## drop: ["NET_RAW"]
## readOnlyRootFilesystem: true
##
containerSecurityContext: {}
## MySQL slave containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 250m
# memory: 256Mi
requests: {}
# cpu: 250m
# memory: 256Mi
## MySQL slave containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
## Initializing the database could take some time
##
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Enable persistence using PVCs on slave nodes
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
mountPath: /bitnami/mysql
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## PVC annotations
##
annotations: {}
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: {{ database_disk_size_in_gib }}Gi
## Use an existing PVC
##
# existingClaim:
## MySQL Service properties
##
service:
## MySQL Service type
##
type: LoadBalancer
## MySQL Service port
##
port: 3306
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort:
master:
slave:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ fqdn }}"
external-dns.alpha.kubernetes.io/ttl: "300"
## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
## loadBalancerIP for the MySQL Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# loadBalancerIP:
# master:
# slave:
## MySQL prometheus metrics parameters
## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint
##
metrics:
enabled: false
## Bitnami MySQL Prometheus exporter image
## ref: https://hub.docker.com/r/bitnami/mysqld-exporter/tags/
##
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-10-r127
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## MySQL Prometheus exporter containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 0.5
# memory: 256Mi
requests: {}
# cpu: 0.5
# memory: 256Mi
## MySQL Prometheus exporter service parameters
##
service:
type: ClusterIP
port: 9104
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9104"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
enabled: false
## Namespace in which Prometheus is running
##
# namespace: monitoring
## Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# interval: 10s
## Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
# scrapeTimeout: 10s
## ServiceMonitor selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
##
# selector:
# prometheus: my-prometheus

View File

@@ -1,505 +0,0 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
nameOverride: {{ name }}
fullnameOverride: {{ name }}
global:
postgresql: {}
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
##
image:
registry: docker.io
repository: bitnami/postgresql
tag: "{{ version }}"
pullPolicy: IfNotPresent
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: true
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
##
securityContext:
runAsUser: 0
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
enabled: false
## Name of an already existing service account. Setting this value disables the automatic service account creation.
# name:
## Pod Security Policy
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
psp:
create: false
## Creates role for ServiceAccount
## Required for PSP
rbac:
create: true
replication:
enabled: false
user: repl_user
password: repl_password
slaveReplicas: 2
## Set synchronous commit mode: on, off, remote_apply, remote_write and local
## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
synchronousCommit: "on"
## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
## NOTE: It cannot be > slaveReplicas
numSynchronousReplicas: 1
## Replication Cluster application name. Useful for defining multiple replication policies
applicationName: my_application
## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
postgresqlPostgresPassword: {{ database_password }}
## PostgreSQL user (has superuser privileges if username is `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
postgresqlUsername: {{ database_login }}
## PostgreSQL password
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
##
postgresqlPassword: {{ database_password }}
## PostgreSQL password using existing secret
## existingSecret: secret
## Mount PostgreSQL secret as a file instead of passing environment variable
# usePasswordFile: false
## Create a database
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
##
# postgresqlDatabase:
postgresqlDatabase: {{ name }}
## PostgreSQL data dir
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
postgresqlDataDir: /bitnami/postgresql/data
## An array to add extra environment variables
## For example:
## extraEnv:
## - name: FOO
## value: "bar"
##
# extraEnv:
extraEnv: []
## Name of a ConfigMap containing extra env vars
##
# extraEnvVarsCM:
## Specify extra initdb args
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbArgs:
## Specify a custom location for the PostgreSQL transaction log
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbWalDir:
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the files/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# postgresqlConfiguration:
## PostgreSQL extended configuration
## As above, but _appended_ to the main configuration
## Alternatively, you can put your *.conf under the files/conf.d/ directory
## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
# postgresqlExtendedConf:
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pgHbaConfiguration: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
# configurationConfigMap:
## ConfigMap with PostgreSQL extended configuration
# extendedConfConfigMap:
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
## Specify the PostgreSQL username and password to execute the initdb scripts
initdbUser: postgres
initdbPassword: {{ database_password }}
## ConfigMap with scripts to be run at first boot
## NOTE: This will override initdbScripts
# initdbScriptsConfigMap:
## Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
# initdbScriptsSecret:
## Optional duration in seconds the pod needs to terminate gracefully.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
# terminationGracePeriodSeconds: 30
## LDAP configuration
##
ldap:
enabled: false
url: ""
server: ""
port: ""
prefix: ""
suffix: ""
baseDN: ""
bindDN: ""
bind_password:
search_attr: ""
search_filter: ""
scheme: ""
tls: false
## PostgreSQL service configuration
service:
type: ClusterIP
# clusterIP: None
port: 5432
annotations: {}
## Start master and slave(s) pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes)
## limit `/dev/shm` to `64M` (see e.g. the
## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
## [containerd issue](https://github.com/containerd/containerd/issues/3654),
## which could be not enough if PostgreSQL uses parallel workers heavily.
##
shmVolume:
## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove
## this limitation.
##
enabled: true
## Set to `true` to `chmod 777 /dev/shm` on a initContainer.
## This option is ingored if `volumePermissions.enabled` is `false`
##
chmod:
enabled: true
## PostgreSQL data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
mountPath: /bitnami/postgresql
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ database_disk_size_in_gib }}Gi
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ id }}
databaseName: {{ name }}
## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
##
## PostgreSQL Master parameters
##
master:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ name }}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
## Additional PostgreSQL Master Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Master Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Override the service configuration for master
##
service: {}
# type:
# nodePort:
# clusterIP:
service_nlb:
fqdn: "{{ fqdn }}"
##
## PostgreSQL Slave parameters
##
slave:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
## Extra init containers
## Example
##
## extraInitContainers:
## - name: do-something
## image: busybox
## command: ['do', 'something']
extraInitContainers: []
## Additional PostgreSQL Slave Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Slave Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Override the service configuration for slave
##
service: {}
# type:
# nodePort:
# clusterIP:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: "{{ database_ram_size_in_mib }}Mi" # TODO customizable?
cpu: "{{ database_total_cpus }}"
## Add annotations to all the deployed resources
##
commonAnnotiations: {}
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port PostgreSQL is listening
## on. When true, PostgreSQL will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the DB.
## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
## Example:
## explicitNamespacesSelector:
## matchLabels:
## role: frontend
## matchExpressions:
## - {key: role, operator: In, values: [frontend]}
explicitNamespacesSelector: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure metrics exporter
##
metrics:
enabled: false
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
loadBalancerIP:
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.8.0-debian-10-r116
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Define additional custom metrics
## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
# customMetrics:
# pg_database:
# query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
# metrics:
# - name:
# usage: "LABEL"
# description: "Name of the database"
# - size_bytes:
# usage: "GAUGE"
# description: "Size of the database in bytes"
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## Configure extra options for liveness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1

View File

@@ -1,712 +0,0 @@
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
redis: {}
## Bitnami Redis image version
## ref: https://hub.docker.com/r/bitnami/redis/tags/
##
image:
registry: docker.io
repository: bitnami/redis
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links
##
tag: 6.0.4-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override redis.fullname template (will maintain the release name)
##
# nameOverride:
nameOverride: {{ databases[database_index]['id'] }}-{{ environment_id }}
## String to fully override redis.fullname template
##
# fullnameOverride:
fullnameOverride: {{ databases[database_index]['id'] }}-{{ environment_id }}
## Cluster settings
cluster:
enabled: false
slaveCount: 3
## Use redis sentinel in the redis pod. This will disable the master and slave services and
## create one redis service with ports to the sentinel and the redis instances
sentinel:
enabled: false
## Require password authentication on the sentinel itself
## ref: https://redis.io/topics/sentinel
usePassword: true
## Bitnami Redis Sentintel image version
## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/
##
image:
registry: docker.io
repository: bitnami/redis-sentinel
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links
##
tag: 6.0.4-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
masterSet: mymaster
initialCheckTimeout: 5
quorum: 2
downAfterMilliseconds: 60000
failoverTimeout: 18000
parallelSyncs: 1
port: 26379
## Additional Redis configuration for the sentinel nodes
## ref: https://redis.io/topics/config
##
configmap:
## Enable or disable static sentinel IDs for each replicas
## If disabled each sentinel will generate a random id at startup
## If enabled, each replicas will have a constant ID on each start-up
##
staticID: false
## Configure extra options for Redis Sentinel liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
customLivenessProbe: {}
customReadinessProbe: {}
## Redis Sentinel resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis Sentinel Service properties
service:
## Redis Sentinel Service type
type: ClusterIP
sentinelPort: 26379
redisPort: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# sentinelNodePort:
# redisNodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
## Specifies the Kubernetes Cluster's Domain Name.
##
clusterDomain: cluster.local
networkPolicy:
## Specifies whether a NetworkPolicy should be created
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port Redis is listening
## on. When true, Redis will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional).
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
rbac:
## Specifies whether RBAC resources should be created
##
create: false
role:
## Rules to create. It follows the role specification
# rules:
# - apiGroups:
# - extensions
# resources:
# - podsecuritypolicies
# verbs:
# - use
# resourceNames:
# - gce.unprivileged
rules: []
## Redis pod Security Context
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## sysctl settings for master and slave pods
##
## Uncomment the setting below to increase the net.core.somaxconn value
##
# sysctls:
# - name: net.core.somaxconn
# value: "10000"
## Use password authentication
usePassword: true
## Redis password (both master and slave)
## Defaults to a random 10-character alphanumeric string if not set and usePassword is true
## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run
##
password: {{ databases[database_index]['password'] }}
## Use existing secret (ignores previous password)
# existingSecret:
## Password key to be retrieved from Redis secret
##
# existingSecretPasswordKey:
## Mount secrets as files instead of environment variables
usePasswordFile: false
## Persist data to a persistent volume (Redis Master)
persistence:
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
existingClaim:
# Redis port
redisPort: {{ databases[database_index]['port'] }}
##
## Redis Master parameters
##
master:
## Redis command arguments
##
## Can be used to specify command line arguments, for example:
##
command: "/run.sh"
## Additional Redis configuration for the master nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis additional command line flags
##
## Can be used to specify command line flags, for example:
##
## extraFlags:
## - "--maxmemory-policy volatile-ttl"
## - "--repl-backlog-size 1024mb"
extraFlags: []
## Comma-separated list of Redis commands to disable
##
## Can be used to disable Redis commands for security reasons.
## Commands will be completely disabled by renaming each to an empty string.
## ref: https://redis.io/topics/security#disabling-of-specific-commands
##
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Master additional pod labels and annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ databases[database_index]['name'] }}
podAnnotations: {}
## Redis Master resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Configure extra options for Redis Master liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
## Configure custom probes for images other images like
## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7
## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false
##
# customLivenessProbe:
# tcpSocket:
# port: 6379
# initialDelaySeconds: 10
# periodSeconds: 5
# customReadinessProbe:
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# exec:
# command:
# - "container-entrypoint"
# - "bash"
# - "-c"
# - "redis-cli set liveness-probe \"`date`\" | grep OK"
customLivenessProbe: {}
customReadinessProbe: {}
## Redis Master Node selectors and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
##
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Redis Master pod/node affinity/anti-affinity
##
affinity: {}
## Redis Master Service properties
service:
## Redis Master Service type
type: LoadBalancer
port: {{ databases[database_index]['port'] }}
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
external-dns.alpha.kubernetes.io/hostname: "{{ databases[database_index]['fqdn'] }}"
external-dns.alpha.kubernetes.io/ttl: "300"
labels:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ databases[database_index]['name'] }}
# loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
storageClass: "aws-ebs-gp2-0"
accessModes:
- ReadWriteOnce
size: {{ databases[database_index]['disk_size_in_mb'] }}Mi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
annotations:
ownerId: {{ owner_id }}
envId: {{ environment_id }}
databaseId: {{ databases[database_index]['name'] }}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## Redis Master pod priorityClassName
##
priorityClassName: {}
##
## Redis Slave properties
## Note: service.type is a mandatory parameter
## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master
##
slave:
## Slave Service properties
service:
## Redis Slave Service type
type: ClusterIP
## Redis port
port: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Redis slave port
port: 6379
## Can be used to specify command line arguments, for example:
##
command: "/run.sh"
## Additional Redis configuration for the slave nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis extra flags
extraFlags: []
## List of Redis commands to disable
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Slave pod/node affinity/anti-affinity
##
affinity: {}
## Kubernetes Spread Constraints for pod assignment
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
# - maxSkew: 1
# topologyKey: node
# whenUnsatisfiable: DoNotSchedule
spreadConstraints: {}
## Configure extra options for Redis Slave liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
## Configure custom probes for images other images like
## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7
## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false
##
# customLivenessProbe:
# tcpSocket:
# port: 6379
# initialDelaySeconds: 10
# periodSeconds: 5
# customReadinessProbe:
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 5
# exec:
# command:
# - "container-entrypoint"
# - "bash"
# - "-c"
# - "redis-cli set liveness-probe \"`date`\" | grep OK"
customLivenessProbe: {}
customReadinessProbe: {}
## Redis slave Resource
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis slave selectors and tolerations for pod assignment
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Redis slave pod Annotation and Labels
podLabels: {}
podAnnotations: {}
## Redis slave pod priorityClassName
# priorityClassName: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.6.1-debian-10-r15
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Extra arguments for Metrics exporter, for example:
## extraArgs:
## check-keys: myKey,myOtherKey
# extraArgs: {}
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9121"
# podLabels: {}
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
## Specify a namespace if needed
# namespace: monitoring
# fallback to the prometheus default unless specified
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
{% raw %}
## Redis prometheus rules
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current postgresql service.
# rules:
# - alert: RedisDown
# expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0
# for: 2m
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $labels.instance }}" }} down
# description: Redis instance {{ "{{ $labels.instance }}" }} is down
# - alert: RedisMemoryHigh
# expr: >
# redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100
# /
# redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"}
# > 90 =< 100
# for: 2m
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $labels.instance }}" }} is using too much memory
# description: |
# Redis instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory.
# - alert: RedisKeyEviction
# expr: |
# increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0
# for: 1s
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $labels.instance }}" }} has evicted keys
# description: |
# Redis instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes.
{% endraw %}
rules: []
## Metrics exporter pod priorityClassName
# priorityClassName: {}
service:
type: ClusterIP
## Use serviceLoadBalancerIP to request a specific static IP,
## otherwise leave blank
# loadBalancerIP:
annotations: {}
labels: {}
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## Redis config file
## ref: https://redis.io/topics/config
##
configmap: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
## Sysctl InitContainer
## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings)
sysctlImage:
enabled: false
command: []
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
mountHostSys: false
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## PodSecurityPolicy configuration
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
## Specifies whether a PodSecurityPolicy should be created
##
create: false

View File

@@ -54,10 +54,13 @@ charts:
repo_name: bitnami
version: 10.6.17
dest: services
- name: pleco
version: 0.1.8
repo_name: pleco
repos:
- name: stable
url: https://kubernetes-charts.storage.googleapis.com
url: https://charts.helm.sh/stable
- name: jetstack
url: https://charts.jetstack.io
- name: bitnami
@@ -68,6 +71,8 @@ repos:
url: https://aws.github.io/eks-charts
- name: loki
url: https://grafana.github.io/loki/charts
- name: pleco
url: https://qovery.github.io/pleco/
destinations:
- name: default

View File

@@ -255,7 +255,7 @@ impl BuildPlatform for LocalDocker {
return Err(self.engine_error(
EngineErrorCause::User(
"It looks like your Dockerfile is wrong. Did you consider building \
your container locally using `qovery run` or `docker build`?",
your container locally using `qovery run` or `docker build --no-cache`?",
),
format!(
"error while building container image {}. Error: {:?}",

View File

@@ -186,6 +186,13 @@ impl<'a> EKS<'a> {
.map(|x| format!("{}", x.clone().to_string()))
.collect();
let managed_dns_resolvers_terraform_format = terraform_list_format(managed_dns_resolvers);
let test_cluster = match self.context.metadata() {
Some(meta) => match meta.test {
Some(true) => true,
_ => false,
},
_ => false,
};
let mut context = TeraContext::new();
// Qovery
@@ -199,6 +206,7 @@ impl<'a> EKS<'a> {
"agent_version_controller_token",
&self.options.agent_version_controller_token,
);
context.insert("test_cluster", &test_cluster);
// DNS configuration
context.insert("managed_dns", &managed_dns_list);
@@ -226,12 +234,9 @@ impl<'a> EKS<'a> {
context.insert("dns_email_report", &self.options.tls_email_report); // Pierre suggested renaming to tls_email_report
// TLS
let lets_encrypt_url = match self.context.metadata() {
Some(meta) => match meta.test {
Some(true) => "https://acme-staging-v02.api.letsencrypt.org/directory",
_ => "https://acme-v02.api.letsencrypt.org/directory",
},
_ => "https://acme-v02.api.letsencrypt.org/directory",
let lets_encrypt_url = match &test_cluster {
true => "https://acme-staging-v02.api.letsencrypt.org/directory",
false => "https://acme-v02.api.letsencrypt.org/directory",
};
context.insert("acme_server_url", lets_encrypt_url);

View File

@@ -67,7 +67,7 @@ pub fn terraform_exec_with_init_validate_plan_apply(
terraform_exec_with_init_validate_plan(root_dir, first_time_init_terraform);
// terraform apply
if dry_run {
if !dry_run {
terraform_exec(root_dir, vec!["apply", "-auto-approve", "tf_plan"])?;
}

View File

@@ -497,6 +497,37 @@ impl<'a> Transaction<'a> {
let applications = apps_result.ok().unwrap();
applications_by_environment.insert(target_environment, applications);
// build as well the failover environment, retention could remove the application image
match environment_action {
EnvironmentAction::EnvironmentWithFailover(_, fe) => {
let apps_result = match self._build_applications(fe, option) {
Ok(applications) => {
match self._push_applications(applications, option) {
Ok(results) => {
let applications = results
.into_iter()
.map(|(app, _)| app)
.collect::<Vec<_>>();
Ok(applications)
}
Err(err) => Err(err),
}
}
Err(err) => Err(err),
};
if apps_result.is_err() {
// should never be triggered because core always should ask for working failover environment
let commit_error = apps_result.err().unwrap();
error!(
"An error occurred on failover application {:?}",
commit_error
);
}
}
_ => {}
};
}
Step::DeployEnvironment(kubernetes, environment_action) => {
// deploy complete environment

View File

@@ -16,7 +16,7 @@ serde = "1.0"
serde_json = "1.0.57"
serde_derive = "1.0"
curl = "0.4.34"
reqwest = { version = "0.10.8", features = ["blocking"] }
# Digital Ocean Deps
digitalocean = "0.1.1"

View File

@@ -75,7 +75,7 @@ pub fn context() -> Context {
let lib_root_dir = std::env::var("LIB_ROOT_DIR").expect("LIB_ROOT_DIR is mandatory");
let metadata = Metadata {
test: Option::from(true),
dry_run_deploy: Option::from(true),
dry_run_deploy: Option::from(false),
};
Context::new(
@@ -713,3 +713,61 @@ pub fn non_working_environment(context: &Context) -> Environment {
environment
}
// echo app environment is an environment that contains http-echo container (forked from hashicorp)
// ECHO_TEXT var will be the content of the application root path
pub fn echo_app_environment(context: &Context) -> Environment {
let suffix = generate_id();
Environment {
execution_id: context.execution_id().to_string(),
id: generate_id(),
kind: Kind::Development,
owner_id: generate_id(),
project_id: generate_id(),
organization_id: ORGANIZATION_ID.to_string(),
action: Action::Create,
applications: vec![Application {
id: generate_id(),
name: format!("{}-{}", "echo-app".to_string(), &suffix),
/*name: "simple-app".to_string(),*/
git_url: "https://github.com/Qovery/engine-testing.git".to_string(),
commit_id: "2205adea1db295547b99f7b17229afd7e879b6ff".to_string(),
dockerfile_path: "Dockerfile".to_string(),
action: Action::Create,
git_credentials: GitCredentials {
login: "x-access-token".to_string(),
access_token: "CHANGE-ME/GITHUB_ACCESS_TOKEN".to_string(),
expired_at: Utc::now(),
},
storage: vec![],
environment_variables: vec![
EnvironmentVariable {
key: "ECHO_TEXT".to_string(),
value: "42".to_string(),
},
],
branch: "echo-app".to_string(),
private_port: Some(5678),
total_cpus: "100m".to_string(),
total_ram_in_mib: 256,
total_instances: 2,
cpu_burst: "100m".to_string(),
start_timeout_in_seconds: 60,
}],
routers: vec![Router {
id: generate_id(),
name: "main".to_string(),
action: Action::Create,
default_domain: generate_id() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN",
public_port: 443,
custom_domains: vec![],
routes: vec![Route {
path: "/".to_string(),
application_name: format!("{}-{}", "echo-app".to_string(), &suffix),
}],
}],
databases: vec![],
external_services: vec![],
clone_from_environment_id: None,
}
}

View File

@@ -1,13 +1,15 @@
use curl::easy::Easy;
use curl::Error;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use crate::aws::{aws_access_key_id, aws_default_region, aws_secret_access_key, KUBE_CLUSTER_ID};
use curl::easy::Easy;
use digitalocean::error::Error::ReqwestError;
use qovery_engine::build_platform::local_docker::LocalDocker;
use qovery_engine::cloud_provider::aws::common;
use qovery_engine::cmd;
use qovery_engine::models::{Context, Environment};
use reqwest::StatusCode;
pub fn build_platform_local_docker(context: &Context) -> LocalDocker {
LocalDocker::new(context.clone(), "oxqlm3r99vwcmvuj", "qovery-local-docker")
@@ -103,3 +105,28 @@ pub fn is_pod_restarted(environment_check: Environment, podToCheck: &str) -> (bo
Err(e) => return (false, "".to_string()),
}
}
// curl it and compare the body result, invalid certs are accepted
pub fn curl_it_and_compare(path: &str, should_return_str: &str) -> Result<bool, reqwest::Error> {
use reqwest::blocking::Client;
let client = Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap();
let res = client.get(path).send();
match res {
Ok(output) => match output.status() {
StatusCode::OK | StatusCode::PERMANENT_REDIRECT => {
let returned = output.text().unwrap();
match should_return_str.eq(returned.as_str()) {
true => Ok(true),
false => Ok(false),
}
}
_ => Ok(false),
},
Err(e) => Err(e),
}
}

716
tests/aws/aws_databases.rs Normal file
View File

@@ -0,0 +1,716 @@
extern crate test_utilities;
use chrono::Utc;
use rusoto_core::region::Region::Custom;
use self::test_utilities::cloudflare::dns_provider_cloudflare;
use self::test_utilities::utilities::generate_id;
use crate::aws::aws_environment::{delete_environment, deploy_environment};
use qovery_engine::cloud_provider::aws::common;
use qovery_engine::cloud_provider::service::Router;
use qovery_engine::cmd;
use qovery_engine::models::Kind::Production;
use qovery_engine::models::{
Action, Clone2, Context, CustomDomain, Database, DatabaseKind, Environment, EnvironmentAction,
EnvironmentVariable, ExternalService, GitCredentials, Kind, Storage, StorageType,
};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use test_utilities::aws::{aws_access_key_id, aws_default_region, aws_secret_access_key, context};
use test_utilities::utilities::{init, is_pod_restarted};
// to check overload between several databases and apps
#[test]
#[ignore]
fn deploy_an_environment_with_3_databases_and_3_apps() {
init();
let context = context();
let context_for_deletion = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::environment_3_apps_3_routers_3_databases(&context);
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO: should be uncommented as soon as cert-manager is fixed
// for the moment this assert report a SSL issue on the second router, so it's works well
/* let connections = test_utilities::utilities::check_all_connections(&env_to_check);
for con in connections {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
/**
**
** PostgreSQL PART
**
**/
#[test]
#[ignore]
fn deploy_a_working_development_environment_with_all_options_and_psql() {
init();
let context = context();
let context_for_deletion = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::environnement_2_app_2_routers_1_psql(&context);
let mut env_to_check = environment.clone();
let mut environment_delete =
test_utilities::aws::environnement_2_app_2_routers_1_psql(&context_for_deletion);
environment.kind = Kind::Development;
environment_delete.kind = Kind::Development;
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_for_deletion = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO: should be uncommented as soon as cert-manager is fixed
// for the moment this assert report a SSL issue on the second router, so it's works well
/* let connections = test_utilities::utilities::check_all_connections(&env_to_check);
for con in connections {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, &ea_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
#[test]
fn deploy_a_working_environment_with_postgresql() {
init();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host =
"postgresql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 5432;
let database_db_name = "my-postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "11.8.0".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
#[test]
fn deploy_a_working_environment_and_redeploy_with_postgresql() {
init();
let context = context();
let context_for_redeploy = context.clone_not_same_execution_id();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host =
"postgresql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 5432;
let database_db_name = "my-postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "11.8.0".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_to_redeploy = environment.clone();
let environment_check = environment.clone();
let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy);
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match deploy_environment(&context_for_redeploy, &ea_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("{}-0", &environment_check.databases[0].name);
match is_pod_restarted(environment_check, database_name.as_str()) {
(true, _) => assert!(true),
(false, _) => assert!(false),
_ => {}
}
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
#[test]
#[ignore]
fn deploy_a_working_production_environment_with_postgresql() {
init();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Kind::Production;
let database_host =
"postgresql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 5432;
let database_db_name = "postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "12.4".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "100m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
fn test_mongodb_configuration(context: Context, mut environment: Environment, version: &str) {
init();
let context_for_delete = context.clone_not_same_execution_id();
let database_host =
"mongodb-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 27017;
let database_db_name = "my-mongodb".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
let database_uri = format!(
"mongodb://{}:{}@{}:{}/{}",
database_username, database_password, database_host, database_port, database_db_name
);
// while waiting the info to be given directly in the database info, we're using this
let is_documentdb = match environment.kind {
Kind::Production => true,
Kind::Development => false,
};
environment.databases = vec![Database {
kind: DatabaseKind::Mongodb,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: version.to_string(),
fqdn_id: "mongodb-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t3.medium".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mongodb-app".to_string();
app.commit_id = "158ea8ebc9897c50a7c56b910db33ce837ac1e61".to_string();
app.private_port = Some(1234);
app.dockerfile_path = format!("Dockerfile-{}", version);
app.environment_variables = vec![
// EnvironmentVariable {
// key: "ENABLE_DEBUG".to_string(),
// value: "true".to_string(),
// },
// EnvironmentVariable {
// key: "DEBUG_PAUSE".to_string(),
// value: "true".to_string(),
// },
EnvironmentVariable {
key: "IS_DOCUMENTDB".to_string(),
value: is_documentdb.to_string(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string(),
value: database_uri.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MONGODB_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mongodb-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
/**
**
** MongoDB PART
**
**/
/// test mongodb v3.6 with development environment
#[test]
fn deploy_a_working_environment_with_mongodb_v3_6() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "3.6");
}
#[test]
#[ignore]
fn deploy_a_working_environment_with_mongodb_v4_0() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.0");
}
/// test mongodb v4.2 with development environment
#[test]
#[ignore]
fn deploy_a_working_environment_with_mongodb_v4_2() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.2");
}
/// test mongodb v4.4 with development environment
#[test]
fn deploy_a_working_environment_with_mongodb_v4_4() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.4");
}
/// test mongodb v3.6 with production environment (DocumentDB)
#[test]
#[ignore]
fn deploy_a_working_environment_with_production_mongodb_v3_6() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Kind::Production;
test_mongodb_configuration(context, environment, "3.6");
}
/**
**
** MySQL PART
**
**/
#[test]
#[ignore]
fn deploy_a_working_environment_with_mysql() {
init();
let context = context();
let deletion_context = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host =
"mysql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 3306;
let database_db_name = "mydb".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Mysql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "5.7.30".to_string(),
fqdn_id: "mysql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mysql-app".to_string();
app.commit_id = "222295112d58d78227c21060d3a707687302e86f".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "MYSQL_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "MYSQL_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MYSQL_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "MYSQL_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "MYSQL_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mysql-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&deletion_context, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
//Todo: remove the namespace (or project)
}
#[test]
#[ignore]
/// Tests the creation of a simple environment on AWS, with the DB provisioned on RDS.
fn deploy_a_working_production_environment_with_mysql() {
init();
let context = context();
let deletion_context = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Production;
let database_host =
"mysql-app-".to_string() + generate_id().as_str() + "-svc.CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 3306;
let database_db_name = "mysql".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Mysql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "5.7.30".to_string(),
fqdn_id: "mysql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mysql-app".to_string();
app.commit_id = "222295112d58d78227c21060d3a707687302e86f".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "MYSQL_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "MYSQL_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MYSQL_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "MYSQL_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "MYSQL_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mysql-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&deletion_context, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}

View File

@@ -15,13 +15,13 @@ use qovery_engine::models::{
};
use qovery_engine::transaction::{DeploymentOption, TransactionResult};
use test_utilities::aws::{aws_access_key_id, aws_default_region, aws_secret_access_key, context};
use test_utilities::utilities::{init, is_pod_restarted};
use test_utilities::utilities::{curl_it_and_compare, init, is_pod_restarted};
// insert how many actions you will use in tests
// args are function you want to use and how many context you want to have
// it permit you to create several different workspaces for each steps
// TODO implement it well
fn generate_contexts_and_environments(
pub fn generate_contexts_and_environments(
number: u8,
func: fn(&Context) -> Environment,
) -> (Vec<Context>, Vec<Environment>) {
@@ -36,7 +36,7 @@ fn generate_contexts_and_environments(
(context_vec, env_vec)
}
fn deploy_environment(
pub fn deploy_environment(
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
@@ -61,7 +61,7 @@ fn deploy_environment(
tx.commit()
}
fn pause_environment(
pub fn pause_environment(
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
@@ -79,7 +79,7 @@ fn pause_environment(
tx.commit()
}
fn delete_environment(
pub fn delete_environment(
context: &Context,
environment_action: &EnvironmentAction,
) -> TransactionResult {
@@ -188,40 +188,6 @@ fn deploy_a_not_working_environment_with_no_router_on_aws_eks() {
//Todo: remove the namespace (or project)
}
// to check overload between several databases and apps
#[test]
#[ignore]
fn deploy_an_environment_with_3_databases_and_3_apps() {
init();
let context = context();
let context_for_deletion = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::environment_3_apps_3_routers_3_databases(&context);
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO: should be uncommented as soon as cert-manager is fixed
// for the moment this assert report a SSL issue on the second router, so it's works well
/* let connections = test_utilities::utilities::check_all_connections(&env_to_check);
for con in connections {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
#[test]
#[ignore]
fn deploy_a_working_environment_with_domain() {
@@ -370,191 +336,6 @@ fn deploy_a_working_environment_with_storage_on_aws_eks() {
//Todo: remove the namespace (or project)
}
#[test]
fn deploy_a_working_environment_with_postgresql() {
init();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host =
"postgresql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 5432;
let database_db_name = "my-postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "11.8.0".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
#[test]
fn deploy_a_working_environment_and_redeploy_with_postgresql() {
init();
let context = context();
let context_for_redeploy = context.clone_not_same_execution_id();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host = "postgresql-".to_string() + generate_id().as_str() + ".oom.sh"; // External access check
let database_port = 5432;
let database_db_name = "my-postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "11.8.0".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_to_redeploy = environment.clone();
let environment_check = environment.clone();
let ea_redeploy = EnvironmentAction::Environment(environment_to_redeploy);
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
match deploy_environment(&context_for_redeploy, &ea_redeploy) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
let database_name = format!("{}-0", &environment_check.databases[0].name);
match is_pod_restarted(environment_check, database_name.as_str()) {
(true, _) => assert!(true),
(false, _) => assert!(false),
_ => {}
}
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
// to check if app redeploy or not, it shouldn't
#[test]
fn redeploy_same_app_with_ebs() {
@@ -616,248 +397,6 @@ fn redeploy_same_app_with_ebs() {
};
}
#[test]
#[ignore]
fn deploy_a_working_production_environment_with_postgresql() {
init();
let context = context();
let context_for_delete = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Kind::Production;
let database_host =
"postgresql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 5432;
let database_db_name = "postgres".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Postgresql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "12.4".to_string(),
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "100m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "postgres-app".to_string();
app.commit_id = "5990752647af11ef21c3d46a51abbde3da1ab351".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "PG_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "PG_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "PG_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "PG_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "PG_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "postgres-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
fn test_mongodb_configuration(context: Context, mut environment: Environment, version: &str) {
init();
let context_for_delete = context.clone_not_same_execution_id();
let database_host =
"mongodb-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 27017;
let database_db_name = "my-mongodb".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
let database_uri = format!(
"mongodb://{}:{}@{}:{}/{}",
database_username, database_password, database_host, database_port, database_db_name
);
// while waiting the info to be given directly in the database info, we're using this
let is_documentdb = match environment.kind {
Kind::Production => true,
Kind::Development => false,
};
environment.databases = vec![Database {
kind: DatabaseKind::Mongodb,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: version.to_string(),
fqdn_id: "mongodb-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t3.medium".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mongodb-app".to_string();
app.commit_id = "158ea8ebc9897c50a7c56b910db33ce837ac1e61".to_string();
app.private_port = Some(1234);
app.dockerfile_path = format!("Dockerfile-{}", version);
app.environment_variables = vec![
// EnvironmentVariable {
// key: "ENABLE_DEBUG".to_string(),
// value: "true".to_string(),
// },
// EnvironmentVariable {
// key: "DEBUG_PAUSE".to_string(),
// value: "true".to_string(),
// },
EnvironmentVariable {
key: "IS_DOCUMENTDB".to_string(),
value: is_documentdb.to_string(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_FQDN".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_MY_DDB_CONNECTION_URI".to_string(),
value: database_uri.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MONGODB_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "QOVERY_DATABASE_TESTING_DATABASE_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mongodb-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&context_for_delete, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
}
/// test mongodb v3.6 with development environment
#[test]
fn deploy_a_working_environment_with_mongodb_v3_6() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "3.6");
}
#[test]
#[ignore]
fn deploy_a_working_environment_with_mongodb_v4_0() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.0");
}
/// test mongodb v4.2 with development environment
#[test]
#[ignore]
fn deploy_a_working_environment_with_mongodb_v4_2() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.2");
}
/// test mongodb v4.4 with development environment
#[test]
fn deploy_a_working_environment_with_mongodb_v4_4() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
test_mongodb_configuration(context, environment, "4.4");
}
/// test mongodb v3.6 with production environment (DocumentDB)
#[test]
#[ignore]
fn deploy_a_working_environment_with_production_mongodb_v3_6() {
let context = context();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Kind::Production;
test_mongodb_configuration(context, environment, "3.6");
}
// #[test]
// fn deploy_a_working_environment_with_external_service() {
// init();
@@ -911,223 +450,6 @@ fn deploy_a_working_environment_with_production_mongodb_v3_6() {
// // TODO: remove the namespace (or project)
// }
#[test]
#[ignore]
fn deploy_a_working_environment_with_mysql() {
init();
let context = context();
let deletion_context = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
let database_host =
"mysql-".to_string() + generate_id().as_str() + ".CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 3306;
let database_db_name = "mydb".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Mysql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "5.7.30".to_string(),
fqdn_id: "mysql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mysql-app".to_string();
app.commit_id = "222295112d58d78227c21060d3a707687302e86f".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "MYSQL_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "MYSQL_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MYSQL_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "MYSQL_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "MYSQL_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mysql-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&deletion_context, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
//Todo: remove the namespace (or project)
}
#[test]
#[ignore]
/// Tests the creation of a simple environment on AWS, with the DB provisioned on RDS.
fn deploy_a_working_production_environment_with_mysql() {
init();
let context = context();
let deletion_context = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::working_minimal_environment(&context);
environment.kind = Production;
let database_host =
"mysql-app-".to_string() + generate_id().as_str() + "-svc.CHANGE-ME/DEFAULT_TEST_DOMAIN"; // External access check
let database_port = 3306;
let database_db_name = "mysql".to_string();
let database_username = "superuser".to_string();
let database_password = generate_id();
environment.databases = vec![Database {
kind: DatabaseKind::Mysql,
action: Action::Create,
id: generate_id(),
name: database_db_name.clone(),
version: "5.7.30".to_string(),
fqdn_id: "mysql-".to_string() + generate_id().as_str(),
fqdn: database_host.clone(),
port: database_port.clone(),
username: database_username.clone(),
password: database_password.clone(),
total_cpus: "500m".to_string(),
total_ram_in_mib: 512,
disk_size_in_gib: 10,
database_instance_type: "db.t2.micro".to_string(),
database_disk_type: "gp2".to_string(),
}];
environment.applications = environment
.applications
.into_iter()
.map(|mut app| {
app.branch = "mysql-app".to_string();
app.commit_id = "222295112d58d78227c21060d3a707687302e86f".to_string();
app.private_port = Some(1234);
app.environment_variables = vec![
EnvironmentVariable {
key: "MYSQL_HOST".to_string(),
value: database_host.clone(),
},
EnvironmentVariable {
key: "MYSQL_PORT".to_string(),
value: database_port.clone().to_string(),
},
EnvironmentVariable {
key: "MYSQL_DBNAME".to_string(),
value: database_db_name.clone(),
},
EnvironmentVariable {
key: "MYSQL_USERNAME".to_string(),
value: database_username.clone(),
},
EnvironmentVariable {
key: "MYSQL_PASSWORD".to_string(),
value: database_password.clone(),
},
];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
environment.routers[0].routes[0].application_name = "mysql-app".to_string();
let mut environment_delete = environment.clone();
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_delete = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// todo: check the database disk is here and with correct size
match delete_environment(&deletion_context, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
#[test]
#[ignore]
fn deploy_a_working_development_environment_with_all_options_and_psql() {
init();
let context = context();
let context_for_deletion = context.clone_not_same_execution_id();
let mut environment = test_utilities::aws::environnement_2_app_2_routers_1_psql(&context);
let mut env_to_check = environment.clone();
let mut environment_delete =
test_utilities::aws::environnement_2_app_2_routers_1_psql(&context_for_deletion);
environment.kind = Kind::Development;
environment_delete.kind = Kind::Development;
environment_delete.action = Action::Delete;
let ea = EnvironmentAction::Environment(environment);
let ea_for_deletion = EnvironmentAction::Environment(environment_delete);
match deploy_environment(&context, &ea) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
// TODO: should be uncommented as soon as cert-manager is fixed
// for the moment this assert report a SSL issue on the second router, so it's works well
/* let connections = test_utilities::utilities::check_all_connections(&env_to_check);
for con in connections {
assert_eq!(con, true);
}*/
match delete_environment(&context_for_deletion, &ea_for_deletion) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
/*#[test]
#[ignore]
fn deploy_a_working_production_environment_with_all_options_on_aws_eks() {
@@ -1340,6 +662,67 @@ fn deploy_a_non_working_environment_with_a_working_failover_on_aws_eks() {
};
}
#[test]
#[ignore]
fn deploy_2_non_working_environments_with_2_working_failovers_on_aws_eks() {
init();
// context for non working environment
let context_failover_1 = context();
let context_failover_2 = context_failover_1.clone_not_same_execution_id();
let context_first_fail_deployement_1 = context_failover_1.clone_not_same_execution_id();
let context_second_fail_deployement_2 = context_failover_1.clone_not_same_execution_id();
let mut failover_environment_1 = test_utilities::aws::echo_app_environment(&context_failover_1);
let mut fail_app_1 =
test_utilities::aws::non_working_environment(&context_first_fail_deployement_1);
let mut failover_environment_2 = test_utilities::aws::echo_app_environment(&context_failover_2);
let mut fail_app_2 =
test_utilities::aws::non_working_environment(&context_second_fail_deployement_2);
failover_environment_2.applications = failover_environment_2
.applications
.into_iter()
.map(|mut app| {
app.environment_variables = vec![EnvironmentVariable {
key: "ECHO_TEXT".to_string(),
value: "Lilou".to_string(),
}];
app
})
.collect::<Vec<qovery_engine::models::Application>>();
// context for deletion
let context_deletion = context_failover_1.clone_not_same_execution_id();
let mut delete_env = test_utilities::aws::echo_app_environment(&context_deletion);
delete_env.action = Action::Delete;
let ea_delete = EnvironmentAction::Environment(delete_env);
let envToCheck = failover_environment_1.clone();
// first deployement
let ea1 = EnvironmentAction::EnvironmentWithFailover(fail_app_1, failover_environment_1);
let ea2 = EnvironmentAction::EnvironmentWithFailover(fail_app_2, failover_environment_2);
match deploy_environment(&context_failover_1, &ea1) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match deploy_environment(&context_failover_2, &ea2) {
TransactionResult::Ok => assert!(false),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(true),
};
match delete_environment(&context_deletion, &ea_delete) {
TransactionResult::Ok => assert!(true),
TransactionResult::Rollback(_) => assert!(false),
TransactionResult::UnrecoverableError(_, _) => assert!(false),
};
}
#[test]
#[ignore]
fn deploy_a_non_working_environment_with_a_non_working_failover_on_aws_eks() {

View File

@@ -1,3 +1,4 @@
mod aws_databases;
mod aws_environment;
mod aws_kubernetes;
mod deletion;