mirror of
https://github.com/jlengrand/engine.git
synced 2026-03-10 08:11:21 +00:00
Merge branch 'dev' into feat/ENG_1085_switch_db_access
This commit is contained in:
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Run selected functional tests
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 180
|
||||
env:
|
||||
GITLAB_PROJECT_ID: ${{ secrets.GITLAB_PROJECT_ID }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Only validate PR if all tests have been requested
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 180
|
||||
env:
|
||||
GITLAB_PROJECT_ID: ${{ secrets.GITLAB_PROJECT_ID }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
|
||||
@@ -18,13 +18,15 @@ deploymentStrategy:
|
||||
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
enabled: false
|
||||
storageClassName: aws-ebs-gp2-0
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 1Gi
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
inMemory:
|
||||
enabled: true
|
||||
|
||||
adminUser: "{{ grafana_admin_user }}"
|
||||
adminPassword: "{{ grafana_admin_password }}"
|
||||
|
||||
8
lib/aws/bootstrap/chart_values/metrics-server.yaml
Normal file
8
lib/aws/bootstrap/chart_values/metrics-server.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
extraArgs:
|
||||
kubelet-preferred-address-types: InternalIP,ExternalIP,Hostname
|
||||
kubelet-use-node-status-port: true
|
||||
metric-resolution: 15s
|
||||
cert-dir: /tmp
|
||||
|
||||
apiService:
|
||||
create: true
|
||||
@@ -148,6 +148,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: {{ private_port }}
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -501,6 +501,7 @@ service:
|
||||
## MySQL Service type
|
||||
##
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
name: {{ service_name }}
|
||||
|
||||
## MySQL Service port
|
||||
##
|
||||
|
||||
@@ -251,6 +251,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: 5432
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -370,6 +370,7 @@ master:
|
||||
## Redis Master Service type
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
port: 6379
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{%- if is_private_port %}
|
||||
{%- if (ports is defined) and ports %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -23,4 +23,4 @@ spec:
|
||||
appId: {{ id }}
|
||||
app: {{ sanitized_name }}
|
||||
envId: {{ environment_id }}
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -50,6 +50,11 @@ resource "helm_release" "documentdb_instance_external_name" {
|
||||
value = "{{database_id}}"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "service_name"
|
||||
value = "{{service_name}}"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
aws_docdb_cluster.documentdb_cluster
|
||||
]
|
||||
|
||||
@@ -51,6 +51,10 @@ resource "helm_release" "mysql_instance_external_name" {
|
||||
name = "app_id"
|
||||
value = "{{database_id}}"
|
||||
}
|
||||
set {
|
||||
name = "service_name"
|
||||
value = "{{service_name}}"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
aws_db_instance.mysql_instance
|
||||
|
||||
@@ -51,6 +51,10 @@ resource "helm_release" "postgres_instance_external_name" {
|
||||
name = "app_id"
|
||||
value = "{{database_id}}"
|
||||
}
|
||||
set {
|
||||
name = "service_name"
|
||||
value = "{{service_name}}"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
aws_db_instance.postgresql_instance
|
||||
|
||||
@@ -50,6 +50,11 @@ resource "helm_release" "elasticache_instance_external_name" {
|
||||
value = "{{database_id}}"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "service_name"
|
||||
value = "{{service_name}}"
|
||||
}
|
||||
|
||||
set {
|
||||
name= "publicly_accessible"
|
||||
value= {{ publicly_accessible }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 1.7.1
|
||||
digest: sha256:d05549cd2eb5b99a49655221b8efd09927cc48daca3fa9f19af0257a11e5260f
|
||||
generated: "2021-07-31T15:03:06.939238802Z"
|
||||
version: 1.10.3
|
||||
digest: sha256:710e8247ae70ea63a2fb2fde4320511ff28c7b5c7a738861427f104a7718bdf4
|
||||
generated: "2021-12-16T23:00:17.006701838Z"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
annotations:
|
||||
category: Analytics
|
||||
apiVersion: v2
|
||||
appVersion: 0.5.0
|
||||
appVersion: 0.5.2
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
@@ -23,4 +23,4 @@ name: metrics-server
|
||||
sources:
|
||||
- https://github.com/bitnami/bitnami-docker-metrics-server
|
||||
- https://github.com/kubernetes-incubator/metrics-server
|
||||
version: 5.9.2
|
||||
version: 5.10.13
|
||||
|
||||
@@ -11,7 +11,7 @@ $ helm install my-release bitnami/metrics-server
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [Metrics Server](https://github.com/bitnami/bitnami-docker-metrics-server) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
This chart bootstraps a [Metrics Server](https://github.com/bitnami/bitnami-docker-metrics-server) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
|
||||
|
||||
@@ -55,10 +55,13 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | -------------------------------------------------------------------------------------------- | ----- |
|
||||
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
|
||||
| Name | Description | Value |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------- | ----- |
|
||||
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
|
||||
| `commonLabels` | Add labels to all the deployed resources | `{}` |
|
||||
| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
|
||||
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
|
||||
|
||||
|
||||
### Metrics Server parameters
|
||||
@@ -67,7 +70,7 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------ |
|
||||
| `image.registry` | Metrics Server image registry | `docker.io` |
|
||||
| `image.repository` | Metrics Server image repository | `bitnami/metrics-server` |
|
||||
| `image.tag` | Metrics Server image tag (immutable tags are recommended) | `0.5.0-debian-10-r32` |
|
||||
| `image.tag` | Metrics Server image tag (immutable tags are recommended) | `0.5.2-debian-10-r0` |
|
||||
| `image.pullPolicy` | Metrics Server image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Metrics Server image pull secrets | `[]` |
|
||||
| `hostAliases` | Add deployment host aliases | `[]` |
|
||||
@@ -78,9 +81,12 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
| `serviceAccount.name` | The name of the ServiceAccount to create | `""` |
|
||||
| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a service account | `true` |
|
||||
| `apiService.create` | Specifies whether the v1beta1.metrics.k8s.io API service should be created. You can check if it is needed with `kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes"`. | `false` |
|
||||
| `apiService.insecureSkipTLSVerify` | Specifies whether to skip self-verifying self-signed TLS certificates. Set to "false" if you are providing your own certificates. | `true` |
|
||||
| `apiService.caBundle` | A base64-encoded string of concatenated certificates for the CA chain for the APIService. | `""` |
|
||||
| `securePort` | Port where metrics-server will be running | `8443` |
|
||||
| `hostNetwork` | Enable hostNetwork mode | `false` |
|
||||
| `command` | Override default container command (useful when using custom images) | `[]` |
|
||||
| `dnsPolicy` | Default dnsPolicy setting | `ClusterFirst` |
|
||||
| `command` | Override default container command (useful when using custom images) | `["metrics-server"]` |
|
||||
| `extraArgs` | Extra arguments to pass to metrics-server on start up | `{}` |
|
||||
| `podLabels` | Pod labels | `{}` |
|
||||
| `podAnnotations` | Pod annotations | `{}` |
|
||||
@@ -147,21 +153,25 @@ $ helm install my-release -f values.yaml bitnami/metrics-server
|
||||
|
||||
## Configuration and installation details
|
||||
|
||||
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
|
||||
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
|
||||
|
||||
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
|
||||
|
||||
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
|
||||
|
||||
### Enable security for Metrics Server. Configuring RBAC
|
||||
### Enable RBAC security
|
||||
|
||||
In order to enable Role-based access control for Metrics Servier you can use the following parameter: `rbac.create=true`
|
||||
In order to enable Role-Based Access Control (RBAC) for Metrics Server, use the following parameter: `rbac.create=true`.
|
||||
|
||||
### Setting Pod's affinity
|
||||
### Configure certificates
|
||||
|
||||
This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
|
||||
If you are providing your own certificates for the API Service, set `insecureSkipTLSVerify` to `"false"`, and provide a `caBundle` consisting of the base64-encoded certificate chain.
|
||||
|
||||
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
|
||||
### Set Pod affinity
|
||||
|
||||
This chart allows you to set custom Pod affinity using the `affinity` parameter. Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
|
||||
|
||||
As an alternative, you can use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -175,24 +185,9 @@ This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs
|
||||
|
||||
### To 5.0.0
|
||||
|
||||
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
|
||||
[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
|
||||
|
||||
**What changes were introduced in this major version?**
|
||||
|
||||
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
|
||||
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
|
||||
|
||||
**Considerations when upgrading to this version**
|
||||
|
||||
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
|
||||
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
|
||||
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
|
||||
|
||||
**Useful links**
|
||||
|
||||
- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
|
||||
- https://helm.sh/docs/topics/v2_v3_migration/
|
||||
- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
|
||||
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/metrics-server/administration/upgrade-helm3/).
|
||||
|
||||
### To 4.0.0
|
||||
|
||||
@@ -212,3 +207,19 @@ Use the workaround below to upgrade from versions previous to 2.0.0. The followi
|
||||
```console
|
||||
$ kubectl patch deployment metrics-server --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2022 Bitnami
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
annotations:
|
||||
category: Infrastructure
|
||||
apiVersion: v2
|
||||
appVersion: 1.7.1
|
||||
appVersion: 1.10.0
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/master/bitnami/common
|
||||
@@ -18,6 +18,6 @@ maintainers:
|
||||
name: common
|
||||
sources:
|
||||
- https://github.com/bitnami/charts
|
||||
- http://www.bitnami.com/
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 1.7.1
|
||||
version: 1.10.3
|
||||
|
||||
@@ -50,16 +50,18 @@ The following table lists the helpers available in the library which are scoped
|
||||
|
||||
### Capabilities
|
||||
|
||||
| Helper identifier | Description | Expected Input |
|
||||
|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------|
|
||||
| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
|
||||
| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
|
||||
| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
|
||||
| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
|
||||
| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
|
||||
| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
|
||||
| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for policy | `.` Chart context |
|
||||
| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
|
||||
| Helper identifier | Description | Expected Input |
|
||||
|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------|
|
||||
| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
|
||||
| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
|
||||
| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
|
||||
| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
|
||||
| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
|
||||
| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
|
||||
| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
|
||||
| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context |
|
||||
| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context |
|
||||
| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
|
||||
|
||||
### Errors
|
||||
|
||||
@@ -92,7 +94,7 @@ The following table lists the helpers available in the library which are scoped
|
||||
|
||||
### Names
|
||||
|
||||
| Helper identifier | Description | Expected Inpput |
|
||||
| Helper identifier | Description | Expected Input |
|
||||
|-------------------------|------------------------------------------------------------|-------------------|
|
||||
| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context |
|
||||
| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context |
|
||||
@@ -111,7 +113,7 @@ The following table lists the helpers available in the library which are scoped
|
||||
|
||||
| Helper identifier | Description | Expected Input |
|
||||
|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------|
|
||||
| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
|
||||
| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
|
||||
|
||||
### TplValues
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ Return the target Kubernetes version
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for policy.
|
||||
Return the appropriate apiVersion for poddisruptionbudget.
|
||||
*/}}
|
||||
{{- define "common.capabilities.policy.apiVersion" -}}
|
||||
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
|
||||
@@ -26,6 +26,28 @@ Return the appropriate apiVersion for policy.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for networkpolicy.
|
||||
*/}}
|
||||
{{- define "common.capabilities.networkPolicy.apiVersion" -}}
|
||||
{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
|
||||
{{- print "extensions/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "networking.k8s.io/v1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for cronjob.
|
||||
*/}}
|
||||
{{- define "common.capabilities.cronjob.apiVersion" -}}
|
||||
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
|
||||
{{- print "batch/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "batch/v1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for deployment.
|
||||
*/}}
|
||||
|
||||
@@ -30,3 +30,23 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified dependency name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
Usage:
|
||||
{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
|
||||
*/}}
|
||||
{{- define "common.names.dependency.fullname" -}}
|
||||
{{- if .chartValues.fullnameOverride -}}
|
||||
{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .chartName .chartValues.nameOverride -}}
|
||||
{{- if contains $name .context.Release.Name -}}
|
||||
{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -14,7 +14,7 @@ Params:
|
||||
{{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
|
||||
{{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
|
||||
|
||||
{{- if and (not $existingSecret) (eq $enabled "true") -}}
|
||||
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
|
||||
{{- $requiredPasswords := list -}}
|
||||
|
||||
{{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
|
||||
|
||||
@@ -18,7 +18,7 @@ Params:
|
||||
{{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
|
||||
{{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
|
||||
|
||||
{{- if and (not $existingSecret) (eq $enabled "true") -}}
|
||||
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
|
||||
{{- $requiredPasswords := list -}}
|
||||
|
||||
{{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
|
||||
|
||||
@@ -22,7 +22,7 @@ Params:
|
||||
|
||||
{{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}}
|
||||
|
||||
{{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}}
|
||||
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}}
|
||||
{{- $requiredPasswords := list -}}
|
||||
|
||||
{{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}}
|
||||
|
||||
@@ -13,10 +13,8 @@ Params:
|
||||
{{- $enabled := include "common.postgresql.values.enabled" . -}}
|
||||
{{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}}
|
||||
{{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}}
|
||||
|
||||
{{- if and (not $existingSecret) (eq $enabled "true") -}}
|
||||
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
|
||||
{{- $requiredPasswords := list -}}
|
||||
|
||||
{{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}}
|
||||
{{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ Params:
|
||||
{{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }}
|
||||
{{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }}
|
||||
|
||||
{{- if and (not $existingSecretValue) (eq $enabled "true") -}}
|
||||
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
|
||||
{{- $requiredPasswords := list -}}
|
||||
|
||||
{{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}}
|
||||
|
||||
@@ -5,6 +5,12 @@ metadata:
|
||||
name: {{ include "common.names.fullname" . }}-auth-delegator
|
||||
namespace: kube-system
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -4,6 +4,12 @@ kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "common.names.fullname" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -3,6 +3,12 @@ kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "common.names.fullname" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
selector:
|
||||
@@ -51,6 +57,7 @@ spec:
|
||||
{{- if .Values.hostNetwork }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||
containers:
|
||||
{{- if .Values.extraContainers }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.extraContainers "context" $) | nindent 8 }}
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
{{- range .Values.extraDeploy }}
|
||||
---
|
||||
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
|
||||
{{- end }}
|
||||
@@ -8,13 +8,22 @@ kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
service:
|
||||
name: {{ template "common.names.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
insecureSkipTLSVerify: {{ .Values.apiService.insecureSkipTLSVerify | default true }}
|
||||
{{- if .Values.apiService.caBundle }}
|
||||
caBundle: {{ .Values.apiService.caBundle }}
|
||||
{{- end }}
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
{{- end -}}
|
||||
|
||||
@@ -4,6 +4,12 @@ kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "common.names.fullname" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -4,6 +4,12 @@ kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "common.names.fullname" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.podDisruptionBudget.minAvailable }}
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
@@ -13,4 +19,4 @@ spec:
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -5,6 +5,12 @@ metadata:
|
||||
name: {{ printf "%s-auth-reader" (include "common.names.fullname" .) }}
|
||||
namespace: kube-system
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -4,5 +4,11 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "common.names.fullname" . }}
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -2,12 +2,22 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "common.names.fullname" . }}
|
||||
|
||||
labels: {{- include "common.labels.standard" . | nindent 4 }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.service.annotations .Values.commonAnnotations }}
|
||||
annotations:
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }}
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
|
||||
@@ -22,6 +22,15 @@ nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname template
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param commonLabels Add labels to all the deployed resources
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Add annotations to all the deployed resources
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
|
||||
## @section Metrics Server parameters
|
||||
|
||||
@@ -36,10 +45,10 @@ fullnameOverride: ""
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/metrics-server
|
||||
tag: 0.5.0-debian-10-r59
|
||||
tag: 0.5.2-debian-10-r49
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
@@ -98,6 +107,11 @@ apiService:
|
||||
## This is still necessary up to at least k8s version >= 1.21, but depends on vendors and cloud providers.
|
||||
##
|
||||
create: false
|
||||
## @param apiService.insecureSkipTLSVerify Specifies whether to skip self-verifying self-signed TLS certificates. Set to "false" if you are providing your own certificates.
|
||||
## Note that "false" MUST be in quotation marks (cf. https://github.com/helm/helm/issues/3308), since false without quotation marks will render to true
|
||||
insecureSkipTLSVerify: true
|
||||
## @param apiService.caBundle A base64-encoded string of concatenated certificates for the CA chain for the APIService.
|
||||
caBundle: ""
|
||||
## @param securePort Port where metrics-server will be running
|
||||
##
|
||||
securePort: 8443
|
||||
@@ -107,6 +121,10 @@ securePort: 8443
|
||||
## if you use Weave network on EKS
|
||||
##
|
||||
hostNetwork: false
|
||||
## @param dnsPolicy Default dnsPolicy setting
|
||||
## If you enable hostNetwork then you may need to set your dnsPolicy to something other
|
||||
## than "ClusterFirst" depending on your requirements.
|
||||
dnsPolicy: "ClusterFirst"
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: ["metrics-server"]
|
||||
@@ -215,7 +233,7 @@ service:
|
||||
##
|
||||
labels: {}
|
||||
## Metric Server containers' resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
|
||||
@@ -19,5 +19,7 @@ rules:
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
metadata:
|
||||
# This service is used for deployed services that do not have ingress like database
|
||||
# It is used to provide to the end user an endpoint with desired name
|
||||
name: {{.Values.app_id}}-dns
|
||||
name: {{ .Values.service_name }}
|
||||
labels:
|
||||
appId: {{.Values.app_id}}
|
||||
{{ if .Values.publicly_accessible }}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
target_hostname: ""
|
||||
source_fqdn: ""
|
||||
service_name: ""
|
||||
app_id: ""
|
||||
publicly_accessible: {{ publicly_accessible }}
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "mysql.fullname" . }}
|
||||
name: {{ .Values.service.name }}
|
||||
labels: {{- include "mysql.labels" . | nindent 4 }}
|
||||
component: master
|
||||
{{- if or .Values.service.annotations .Values.metrics.service.annotations }}
|
||||
|
||||
@@ -490,6 +490,7 @@ service:
|
||||
##
|
||||
type: ClusterIP
|
||||
|
||||
#name: {{ service_name }}
|
||||
## MySQL Service port
|
||||
##
|
||||
port: 3306
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "postgresql.fullname" . }}
|
||||
name: {{ .Values.service.name }}
|
||||
labels:
|
||||
app: {{ template "postgresql.name" . }}
|
||||
chart: {{ template "postgresql.chart" . }}
|
||||
|
||||
@@ -249,6 +249,7 @@ ldap:
|
||||
service:
|
||||
## PosgresSQL service type
|
||||
type: ClusterIP
|
||||
name: service_name
|
||||
# clusterIP: None
|
||||
port: 5432
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "redis.fullname" . }}-master
|
||||
name: {{ .Values.master.service.name }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
app: {{ template "redis.name" . }}
|
||||
|
||||
@@ -362,6 +362,7 @@ master:
|
||||
service:
|
||||
## Redis Master Service type
|
||||
type: ClusterIP
|
||||
name: service_name
|
||||
port: 6379
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
|
||||
@@ -3,13 +3,15 @@ deploymentStrategy:
|
||||
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
enabled: false
|
||||
storageClassName: do-volume-standard-0
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 1Gi
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
inMemory:
|
||||
enabled: true
|
||||
|
||||
adminUser: "{{ grafana_admin_user }}"
|
||||
adminPassword: "{{ grafana_admin_password }}"
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
extraArgs:
|
||||
kubelet-preferred-address-types: InternalIP,ExternalIP,Hostname
|
||||
kubelet-use-node-status-port: true
|
||||
metric-resolution: 15s
|
||||
cert-dir: /tmp
|
||||
|
||||
apiService:
|
||||
create: true
|
||||
@@ -151,6 +151,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: {{ private_port }}
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -501,6 +501,7 @@ service:
|
||||
## MySQL Service type
|
||||
##
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
name: {{ service_name }}
|
||||
|
||||
## MySQL Service port
|
||||
##
|
||||
|
||||
@@ -251,6 +251,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: 5432
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -370,6 +370,7 @@ master:
|
||||
## Redis Master Service type
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
port: 6379
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{%- if is_private_port %}
|
||||
{%- if (ports is defined) and ports %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -23,4 +23,4 @@ spec:
|
||||
appId: {{ id }}
|
||||
app: {{ sanitized_name }}
|
||||
envId: {{ environment_id }}
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -8,6 +8,7 @@ charts:
|
||||
- name: grafana
|
||||
version: 6.16.10
|
||||
repo_name: grafana
|
||||
no_sync: true
|
||||
comment: |
|
||||
Dashboard have been set into it as the chart suggest but it's a problem with helm-freeze
|
||||
Need to look if we can move them out of this repo
|
||||
@@ -34,7 +35,7 @@ charts:
|
||||
version: 9.10.4
|
||||
- name: metrics-server
|
||||
repo_name: bitnami
|
||||
version: 5.9.2
|
||||
version: 5.10.13
|
||||
- name: kube-state-metrics
|
||||
repo_name: prometheus-community
|
||||
version: 3.4.2
|
||||
|
||||
@@ -3,13 +3,15 @@ deploymentStrategy:
|
||||
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
enabled: false
|
||||
storageClassName: scw-sbv-ssd-0
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 1Gi
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
inMemory:
|
||||
enabled: true
|
||||
|
||||
adminUser: "{{ grafana_admin_user }}"
|
||||
adminPassword: "{{ grafana_admin_password }}"
|
||||
|
||||
@@ -89,6 +89,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: {{ private_port }}
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -480,6 +480,7 @@ service:
|
||||
##
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
|
||||
name: {{ service_name }}
|
||||
## MySQL Service port
|
||||
##
|
||||
port: 3306
|
||||
|
||||
@@ -123,6 +123,7 @@ service:
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
# clusterIP: None
|
||||
port: 5432
|
||||
name: {{ service_name }}
|
||||
|
||||
## Provide any additional annotations which may be required. Evaluated as a template.
|
||||
##
|
||||
|
||||
@@ -327,6 +327,7 @@ master:
|
||||
## Redis Master Service type
|
||||
type: {% if publicly_accessible -%} LoadBalancer {% else -%} ClusterIP {% endif %}
|
||||
port: 6379
|
||||
name: {{ service_name }}
|
||||
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{%- if is_private_port %}
|
||||
{%- if (ports is defined) and ports %}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -23,4 +23,4 @@ spec:
|
||||
appId: {{ id }}
|
||||
app: {{ sanitized_name }}
|
||||
envId: {{ environment_id }}
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use std::str::Utf8Error;
|
||||
|
||||
/// Extract ARG value from a Dockerfile content
|
||||
@@ -34,6 +33,8 @@ pub fn extract_dockerfile_args(dockerfile_content: Vec<u8>) -> Result<HashSet<St
|
||||
}
|
||||
|
||||
/// Return env var args that are really used in the Dockerfile
|
||||
/// env_var_args is a vector of value "key=value".
|
||||
/// which is the format of the value expected by docker with the argument "build-arg"
|
||||
pub fn match_used_env_var_args(
|
||||
env_var_args: Vec<String>,
|
||||
dockerfile_content: Vec<u8>,
|
||||
@@ -42,9 +43,22 @@ pub fn match_used_env_var_args(
|
||||
let used_args = extract_dockerfile_args(dockerfile_content)?;
|
||||
|
||||
// match env var args and dockerfile env vargs
|
||||
Ok(HashSet::from_iter(env_var_args)
|
||||
let env_var_arg_keys = env_var_args
|
||||
.iter()
|
||||
.map(|env_var| env_var.split("=").next().unwrap_or(&"").to_string())
|
||||
.collect::<HashSet<String>>();
|
||||
|
||||
let matched_env_args_keys = env_var_arg_keys
|
||||
.intersection(&used_args)
|
||||
.map(|arg| arg.clone())
|
||||
.collect::<HashSet<String>>();
|
||||
|
||||
Ok(env_var_args
|
||||
.into_iter()
|
||||
.filter(|env_var_arg| {
|
||||
let env_var_arg_key = env_var_arg.split("=").next().unwrap_or("");
|
||||
matched_env_args_keys.contains(env_var_arg_key)
|
||||
})
|
||||
.collect::<Vec<String>>())
|
||||
}
|
||||
|
||||
@@ -99,19 +113,23 @@ mod tests {
|
||||
let res = extract_dockerfile_args(dockerfile.to_vec());
|
||||
assert_eq!(res.unwrap().len(), 4);
|
||||
|
||||
let matched_vars = match_used_env_var_args(
|
||||
vec![
|
||||
"foo".to_string(),
|
||||
"bar".to_string(),
|
||||
"toto".to_string(),
|
||||
"x".to_string(),
|
||||
],
|
||||
dockerfile.to_vec(),
|
||||
);
|
||||
let env_var_args_to_match = vec![
|
||||
"foo=abcdvalue".to_string(),
|
||||
"bar=abcdvalue".to_string(),
|
||||
"toto=abcdvalue".to_string(),
|
||||
"x=abcdvalue".to_string(),
|
||||
];
|
||||
|
||||
let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec());
|
||||
|
||||
assert_eq!(matched_vars.clone().unwrap(), env_var_args_to_match.clone());
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 4);
|
||||
|
||||
let matched_vars = match_used_env_var_args(vec!["toto".to_string(), "x".to_string()], dockerfile.to_vec());
|
||||
let matched_vars = match_used_env_var_args(
|
||||
vec!["toto=abcdvalue".to_string(), "x=abcdvalue".to_string()],
|
||||
dockerfile.to_vec(),
|
||||
);
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 2);
|
||||
|
||||
@@ -126,16 +144,60 @@ mod tests {
|
||||
RUN ls -lh
|
||||
";
|
||||
|
||||
let matched_vars = match_used_env_var_args(env_var_args_to_match.clone(), dockerfile.to_vec());
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_match_used_env_var_args_2() {
|
||||
let dockerfile = b"
|
||||
# This file is a template, and might need editing before it works on your project.
|
||||
FROM node:16-alpine as build
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
ARG PRISMIC_REPO_NAME
|
||||
ENV PRISMIC_REPO_NAME $PRISMIC_REPO_NAME
|
||||
|
||||
ARG PRISMIC_API_KEY
|
||||
ENV PRISMIC_API_KEY $PRISMIC_API_KEY
|
||||
|
||||
ARG PRISMIC_CUSTOM_TYPES_API_TOKEN
|
||||
ENV PRISMIC_CUSTOM_TYPES_API_TOKEN $PRISMIC_CUSTOM_TYPES_API_TOKEN
|
||||
|
||||
RUN npm install && npm run build
|
||||
|
||||
FROM nginx:latest
|
||||
COPY --from=build /app/public /usr/share/nginx/html
|
||||
COPY ./nginx-custom.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 80
|
||||
CMD [\"nginx\", \"-g\", \"daemon off;\"]
|
||||
";
|
||||
|
||||
let res = extract_dockerfile_args(dockerfile.to_vec());
|
||||
assert_eq!(res.unwrap().len(), 3);
|
||||
|
||||
let matched_vars = match_used_env_var_args(
|
||||
vec![
|
||||
"foo".to_string(),
|
||||
"bar".to_string(),
|
||||
"toto".to_string(),
|
||||
"x".to_string(),
|
||||
"PRISMIC_REPO_NAME=abcdvalue".to_string(),
|
||||
"PRISMIC_API_KEY=abcdvalue".to_string(),
|
||||
"PRISMIC_CUSTOM_TYPES_API_TOKEN=abcdvalue".to_string(),
|
||||
],
|
||||
dockerfile.to_vec(),
|
||||
);
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 3);
|
||||
|
||||
let matched_vars =
|
||||
match_used_env_var_args(vec!["PRISMIC_REPO_NAME=abcdvalue".to_string()], dockerfile.to_vec());
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 1);
|
||||
|
||||
let matched_vars = match_used_env_var_args(vec![], dockerfile.to_vec());
|
||||
|
||||
assert_eq!(matched_vars.unwrap().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use chrono::Duration;
|
||||
use git2::{Cred, CredentialType};
|
||||
use sysinfo::{Disk, DiskExt, SystemExt};
|
||||
|
||||
use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, Credentials, Image, Kind};
|
||||
use crate::build_platform::{docker, Build, BuildPlatform, BuildResult, CacheResult, Credentials, Image, Kind};
|
||||
use crate::cmd::utilities::QoveryCommand;
|
||||
use crate::error::{EngineError, EngineErrorCause, SimpleError, SimpleErrorKind};
|
||||
use crate::fs::workspace_directory;
|
||||
@@ -48,6 +48,7 @@ impl LocalDocker {
|
||||
&vec!["image", "inspect", image.name_with_tag().as_str()],
|
||||
&self.get_docker_host_envs(),
|
||||
);
|
||||
|
||||
Ok(matches!(cmd.exec(), Ok(_)))
|
||||
}
|
||||
|
||||
@@ -259,7 +260,7 @@ impl LocalDocker {
|
||||
));
|
||||
},
|
||||
)
|
||||
.map_err(|err| SimpleError::new(SimpleErrorKind::Other, Some(format!("{}", err))));
|
||||
.map_err(|err| SimpleError::new(SimpleErrorKind::Other, Some(format!("{:?}", err))));
|
||||
|
||||
if exit_status.is_ok() {
|
||||
// quit now if the builder successfully build the app
|
||||
@@ -286,6 +287,15 @@ impl LocalDocker {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_repository_build_root_path(&self, build: &Build) -> Result<String, EngineError> {
|
||||
workspace_directory(
|
||||
self.context.workspace_root_dir(),
|
||||
self.context.execution_id(),
|
||||
format!("build/{}", build.image.name.as_str()),
|
||||
)
|
||||
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl BuildPlatform for LocalDocker {
|
||||
@@ -317,6 +327,34 @@ impl BuildPlatform for LocalDocker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_cache(&self, build: &Build) -> Result<CacheResult, EngineError> {
|
||||
info!("LocalDocker.has_cache() called for {}", self.name());
|
||||
|
||||
// Check if a local cache layers for the container image exists.
|
||||
let repository_root_path = self.get_repository_build_root_path(&build)?;
|
||||
|
||||
let parent_build = build
|
||||
.to_previous_build(repository_root_path)
|
||||
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?;
|
||||
|
||||
let parent_build = match parent_build {
|
||||
Some(parent_build) => parent_build,
|
||||
None => return Ok(CacheResult::MissWithoutParentBuild),
|
||||
};
|
||||
|
||||
// check if local layers exist
|
||||
let mut cmd = QoveryCommand::new("docker", &["images", "-q", parent_build.image.name.as_str()], &[]);
|
||||
|
||||
let mut result = CacheResult::Miss(parent_build);
|
||||
let _ = cmd.exec_with_timeout(
|
||||
Duration::minutes(1), // `docker images` command can be slow with tons of images - it's probably not indexed
|
||||
|_| result = CacheResult::Hit, // if a line is returned, then the image is locally present
|
||||
|r_err| error!("Error executing docker command {}", r_err),
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn build(&self, build: Build, force_build: bool) -> Result<BuildResult, EngineError> {
|
||||
info!("LocalDocker.build() called for {}", self.name());
|
||||
|
||||
@@ -331,19 +369,13 @@ impl BuildPlatform for LocalDocker {
|
||||
return Ok(BuildResult { build });
|
||||
}
|
||||
|
||||
// git clone
|
||||
let repository_root_path = workspace_directory(
|
||||
self.context.workspace_root_dir(),
|
||||
self.context.execution_id(),
|
||||
format!("build/{}", build.image.name.as_str()),
|
||||
)
|
||||
.map_err(|err| self.engine_error(EngineErrorCause::Internal, err.to_string()))?;
|
||||
let repository_root_path = self.get_repository_build_root_path(&build)?;
|
||||
|
||||
// Clone git repository
|
||||
info!(
|
||||
"cloning repository: {} to {}",
|
||||
build.git_repository.url, repository_root_path
|
||||
);
|
||||
|
||||
let get_credentials = |user: &str| {
|
||||
let mut creds: Vec<(CredentialType, Cred)> = Vec::with_capacity(build.git_repository.ssh_keys.len() + 1);
|
||||
for ssh_key in build.git_repository.ssh_keys.iter() {
|
||||
@@ -360,9 +392,17 @@ impl BuildPlatform for LocalDocker {
|
||||
Cred::userpass_plaintext(&login, &password).unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
creds
|
||||
};
|
||||
|
||||
if Path::new(repository_root_path.as_str()).exists() {
|
||||
// remove folder before cloning it again
|
||||
// FIXME: reuse this folder and checkout the right commit
|
||||
let _ = fs::remove_dir_all(repository_root_path.as_str());
|
||||
}
|
||||
|
||||
// git clone
|
||||
if let Err(clone_error) = git::clone_at_commit(
|
||||
&build.git_repository.url,
|
||||
&build.git_repository.commit_id,
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope};
|
||||
use crate::git;
|
||||
use crate::models::{Context, Listen};
|
||||
use crate::utilities::get_image_tag;
|
||||
use git2::{Cred, CredentialType, Error};
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
use std::path::Path;
|
||||
|
||||
pub mod docker;
|
||||
pub mod local_docker;
|
||||
@@ -16,6 +21,7 @@ pub trait BuildPlatform: Listen {
|
||||
format!("{} ({})", self.name(), self.id())
|
||||
}
|
||||
fn is_valid(&self) -> Result<(), EngineError>;
|
||||
fn has_cache(&self, build: &Build) -> Result<CacheResult, EngineError>;
|
||||
fn build(&self, build: Build, force_build: bool) -> Result<BuildResult, EngineError>;
|
||||
fn build_error(&self, build: Build) -> Result<BuildResult, EngineError>;
|
||||
fn engine_error_scope(&self) -> EngineErrorScope {
|
||||
@@ -37,20 +43,79 @@ pub struct Build {
|
||||
pub options: BuildOptions,
|
||||
}
|
||||
|
||||
impl Build {
|
||||
pub fn to_previous_build<P>(&self, clone_repo_into_dir: P) -> Result<Option<Build>, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let parent_commit_id = git::get_parent_commit_id(
|
||||
self.git_repository.url.as_str(),
|
||||
self.git_repository.commit_id.as_str(),
|
||||
clone_repo_into_dir,
|
||||
&|_| match &self.git_repository.credentials {
|
||||
None => vec![],
|
||||
Some(creds) => vec![(
|
||||
CredentialType::USER_PASS_PLAINTEXT,
|
||||
Cred::userpass_plaintext(creds.login.as_str(), creds.password.as_str()).unwrap(),
|
||||
)],
|
||||
},
|
||||
)?;
|
||||
|
||||
let parent_commit_id = match parent_commit_id {
|
||||
None => return Ok(None),
|
||||
Some(parent_commit_id) => parent_commit_id,
|
||||
};
|
||||
|
||||
let mut environment_variables_map = BTreeMap::<String, String>::new();
|
||||
for env in &self.options.environment_variables {
|
||||
environment_variables_map.insert(env.key.clone(), env.value.clone());
|
||||
}
|
||||
|
||||
let mut image = self.image.clone();
|
||||
image.tag = get_image_tag(
|
||||
&self.git_repository.root_path,
|
||||
&self.git_repository.dockerfile_path,
|
||||
&environment_variables_map,
|
||||
&parent_commit_id,
|
||||
);
|
||||
|
||||
image.commit_id = parent_commit_id.clone();
|
||||
|
||||
Ok(Some(Build {
|
||||
git_repository: GitRepository {
|
||||
url: self.git_repository.url.clone(),
|
||||
credentials: self.git_repository.credentials.clone(),
|
||||
ssh_keys: self.git_repository.ssh_keys.clone(),
|
||||
commit_id: parent_commit_id,
|
||||
dockerfile_path: self.git_repository.dockerfile_path.clone(),
|
||||
root_path: self.git_repository.root_path.clone(),
|
||||
buildpack_language: self.git_repository.buildpack_language.clone(),
|
||||
},
|
||||
image,
|
||||
options: BuildOptions {
|
||||
environment_variables: self.options.environment_variables.clone(),
|
||||
},
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BuildOptions {
|
||||
pub environment_variables: Vec<EnvironmentVariable>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
|
||||
pub struct EnvironmentVariable {
|
||||
pub key: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
|
||||
pub struct Credentials {
|
||||
pub login: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
|
||||
pub struct SshKey {
|
||||
pub private_key: String,
|
||||
pub passphrase: Option<String>,
|
||||
@@ -129,3 +194,11 @@ impl BuildResult {
|
||||
pub enum Kind {
|
||||
LocalDocker,
|
||||
}
|
||||
|
||||
type ParentBuild = Build;
|
||||
|
||||
pub enum CacheResult {
|
||||
MissWithoutParentBuild,
|
||||
Miss(ParentBuild),
|
||||
Hit,
|
||||
}
|
||||
|
||||
@@ -191,6 +191,7 @@ impl Service for MongoDB {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_db_name", self.name.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
|
||||
@@ -212,6 +212,7 @@ impl Service for MySQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -197,6 +197,7 @@ impl Service for PostgreSQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_name", self.sanitized_name().as_str());
|
||||
context.insert("database_db_name", self.name());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
|
||||
@@ -213,6 +213,7 @@ impl Service for Redis {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -574,6 +574,7 @@ pub fn aws_helm_charts(
|
||||
chart_info: ChartInfo {
|
||||
name: "metrics-server".to_string(),
|
||||
path: chart_path("common/charts/metrics-server"),
|
||||
values_files: vec![chart_path("chart_values/metrics-server.yaml")],
|
||||
values: vec![
|
||||
ChartSetValue {
|
||||
key: "resources.limits.cpu".to_string(),
|
||||
|
||||
@@ -1404,7 +1404,7 @@ impl<'a> Kubernetes for EKS<'a> {
|
||||
if let Err(e) = self.delete_crashlooping_pods(
|
||||
None,
|
||||
None,
|
||||
Some(10),
|
||||
Some(3),
|
||||
self.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
error!(
|
||||
|
||||
@@ -382,7 +382,6 @@ impl Create for Router {
|
||||
}
|
||||
|
||||
impl Pause for Router {
|
||||
#[named]
|
||||
fn on_pause(&self, _target: &DeploymentTarget) -> Result<(), EngineError> {
|
||||
info!("AWS.router.on_pause() called for {}, doing nothing", self.name());
|
||||
Ok(())
|
||||
|
||||
@@ -188,6 +188,7 @@ impl Service for MongoDB {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_db_name", self.name.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
|
||||
@@ -188,6 +188,7 @@ impl Service for MySQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -188,6 +188,7 @@ impl Service for PostgreSQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_db_name", self.name());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
|
||||
@@ -188,6 +188,7 @@ impl Service for Redis {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -428,15 +428,8 @@ pub fn do_helm_charts(
|
||||
chart_info: ChartInfo {
|
||||
name: "metrics-server".to_string(),
|
||||
path: chart_path("common/charts/metrics-server"),
|
||||
values_files: vec![chart_path("chart_values/metrics-server.yaml")],
|
||||
values: vec![
|
||||
ChartSetValue {
|
||||
key: "extraArgs.kubelet-preferred-address-types".to_string(),
|
||||
value: "InternalIP".to_string(),
|
||||
},
|
||||
ChartSetValue {
|
||||
key: "apiService.create".to_string(),
|
||||
value: "true".to_string(),
|
||||
},
|
||||
ChartSetValue {
|
||||
key: "resources.limits.cpu".to_string(),
|
||||
value: "250m".to_string(),
|
||||
|
||||
@@ -612,6 +612,27 @@ impl<'a> DOKS<'a> {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
match self.check_workers_on_create() {
|
||||
Ok(_) => {
|
||||
let message = format!("Kubernetes {} nodes have been successfully created", self.name());
|
||||
info!("{}", &message);
|
||||
self.send_to_customer(&message, &listeners_helper);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Error while deploying cluster {} with Terraform with id {}.",
|
||||
self.name(),
|
||||
self.id()
|
||||
);
|
||||
return Err(EngineError {
|
||||
cause: EngineErrorCause::Internal,
|
||||
scope: EngineErrorScope::Engine,
|
||||
execution_id: self.context.execution_id().to_string(),
|
||||
message: e.message,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// kubernetes helm deployments on the cluster
|
||||
let kubeconfig_path = match self.get_kubeconfig_file_path() {
|
||||
Ok(path) => path,
|
||||
@@ -1229,7 +1250,7 @@ impl<'a> Kubernetes for DOKS<'a> {
|
||||
match self.delete_crashlooping_pods(
|
||||
None,
|
||||
None,
|
||||
Some(10),
|
||||
Some(3),
|
||||
self.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
Ok(..) => {}
|
||||
|
||||
@@ -130,7 +130,7 @@ impl Default for ChartInfo {
|
||||
atomic: true,
|
||||
force_upgrade: false,
|
||||
last_breaking_version_requiring_restart: None,
|
||||
timeout_in_seconds: 180,
|
||||
timeout_in_seconds: 300,
|
||||
dry_run: false,
|
||||
wait: true,
|
||||
values: Vec::new(),
|
||||
|
||||
@@ -24,6 +24,7 @@ use crate::cmd::kubectl::{
|
||||
kubectl_delete_objects_in_all_namespaces, kubectl_exec_count_all_objects, kubectl_exec_delete_pod,
|
||||
kubectl_exec_get_node, kubectl_exec_version, kubectl_get_crash_looping_pods, kubernetes_get_all_pdbs,
|
||||
};
|
||||
use crate::cmd::structs::KubernetesNodeCondition;
|
||||
use crate::dns_provider::DnsProvider;
|
||||
use crate::error::SimpleErrorKind::Other;
|
||||
use crate::error::{EngineError, EngineErrorCause, EngineErrorScope, SimpleError, SimpleErrorKind};
|
||||
@@ -226,6 +227,18 @@ pub trait Kubernetes: Listen {
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn check_workers_on_create(&self) -> Result<(), SimpleError>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
send_progress_on_long_task(self, Action::Create, || {
|
||||
check_workers_status(
|
||||
self.get_kubeconfig_file_path().expect("Unable to get Kubeconfig"),
|
||||
self.cloud_provider().credentials_environment_variables(),
|
||||
)
|
||||
})
|
||||
}
|
||||
fn upgrade_with_status(&self, kubernetes_upgrade_status: KubernetesUpgradeStatus) -> Result<(), EngineError>;
|
||||
fn on_upgrade(&self) -> Result<(), EngineError>;
|
||||
fn on_upgrade_error(&self) -> Result<(), EngineError>;
|
||||
@@ -850,7 +863,7 @@ pub fn is_kubernetes_upgradable<P>(kubernetes_config: P, envs: Vec<(&str, &str)>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
match kubernetes_get_all_pdbs(kubernetes_config, envs) {
|
||||
match kubernetes_get_all_pdbs(kubernetes_config, envs, None) {
|
||||
Ok(pdbs) => match pdbs.items.is_some() {
|
||||
false => Ok(()),
|
||||
true => {
|
||||
@@ -908,6 +921,43 @@ where
|
||||
}
|
||||
});
|
||||
|
||||
return match result {
|
||||
Ok(_) => match check_workers_status(kubernetes_config.as_ref(), envs.clone()) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
Err(Operation { error, .. }) => Err(error),
|
||||
Err(retry::Error::Internal(e)) => Err(SimpleError::new(SimpleErrorKind::Other, Some(e))),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn check_workers_status<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<(), SimpleError>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let result = retry::retry(Fixed::from_millis(10000).take(60), || {
|
||||
match kubectl_exec_get_node(kubernetes_config.as_ref(), envs.clone()) {
|
||||
Err(e) => OperationResult::Retry(e),
|
||||
Ok(nodes) => {
|
||||
let mut conditions: Vec<KubernetesNodeCondition> = Vec::new();
|
||||
for node in nodes.items.into_iter() {
|
||||
conditions.extend(node.status.conditions.into_iter());
|
||||
}
|
||||
|
||||
for condition in conditions.iter() {
|
||||
if condition.condition_type == "Ready" && condition.status != "True" {
|
||||
info!("All worker nodes are not ready yet, waiting...");
|
||||
return OperationResult::Retry(SimpleError::new(
|
||||
SimpleErrorKind::Other,
|
||||
Some("There are still not ready worker nodes."),
|
||||
));
|
||||
}
|
||||
}
|
||||
return OperationResult::Ok(());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(Operation { error, .. }) => Err(error),
|
||||
|
||||
@@ -482,6 +482,7 @@ impl FromStr for ScwRegion {
|
||||
pub enum ScwZone {
|
||||
Paris1,
|
||||
Paris2,
|
||||
Paris3,
|
||||
Amsterdam1,
|
||||
Warsaw1,
|
||||
}
|
||||
@@ -492,6 +493,7 @@ impl ScwZone {
|
||||
match self {
|
||||
ScwZone::Paris1 => "fr-par-1",
|
||||
ScwZone::Paris2 => "fr-par-2",
|
||||
ScwZone::Paris3 => "fr-par-3",
|
||||
ScwZone::Amsterdam1 => "nl-ams-1",
|
||||
ScwZone::Warsaw1 => "pl-waw-1",
|
||||
}
|
||||
@@ -501,6 +503,7 @@ impl ScwZone {
|
||||
match self {
|
||||
ScwZone::Paris1 => ScwRegion::Paris,
|
||||
ScwZone::Paris2 => ScwRegion::Paris,
|
||||
ScwZone::Paris3 => ScwRegion::Paris,
|
||||
ScwZone::Amsterdam1 => ScwRegion::Amsterdam,
|
||||
ScwZone::Warsaw1 => ScwRegion::Warsaw,
|
||||
}
|
||||
@@ -511,6 +514,7 @@ impl ScwZone {
|
||||
match self {
|
||||
ScwZone::Paris1 => "fr-par",
|
||||
ScwZone::Paris2 => "fr-par",
|
||||
ScwZone::Paris3 => "fr-par",
|
||||
ScwZone::Amsterdam1 => "nl-ams",
|
||||
ScwZone::Warsaw1 => "pl-waw",
|
||||
}
|
||||
@@ -523,6 +527,7 @@ impl fmt::Display for ScwZone {
|
||||
match self {
|
||||
ScwZone::Paris1 => write!(f, "fr-par-1"),
|
||||
ScwZone::Paris2 => write!(f, "fr-par-2"),
|
||||
ScwZone::Paris3 => write!(f, "fr-par-3"),
|
||||
ScwZone::Amsterdam1 => write!(f, "nl-ams-1"),
|
||||
ScwZone::Warsaw1 => write!(f, "pl-waw-1"),
|
||||
}
|
||||
@@ -536,6 +541,7 @@ impl FromStr for ScwZone {
|
||||
match s {
|
||||
"fr-par-1" => Ok(ScwZone::Paris1),
|
||||
"fr-par-2" => Ok(ScwZone::Paris2),
|
||||
"fr-par-3" => Ok(ScwZone::Paris3),
|
||||
"nl-ams-1" => Ok(ScwZone::Amsterdam1),
|
||||
"pl-waw-1" => Ok(ScwZone::Warsaw1),
|
||||
_ => Err(()),
|
||||
@@ -566,6 +572,7 @@ mod tests {
|
||||
fn test_zone_to_str() {
|
||||
assert_eq!("fr-par-1", ScwZone::Paris1.as_str());
|
||||
assert_eq!("fr-par-2", ScwZone::Paris2.as_str());
|
||||
assert_eq!("fr-par-3", ScwZone::Paris3.as_str());
|
||||
assert_eq!("nl-ams-1", ScwZone::Amsterdam1.as_str());
|
||||
assert_eq!("pl-waw-1", ScwZone::Warsaw1.as_str());
|
||||
}
|
||||
@@ -574,6 +581,7 @@ mod tests {
|
||||
fn test_zone_from_str() {
|
||||
assert_eq!(ScwZone::from_str("fr-par-1"), Ok(ScwZone::Paris1));
|
||||
assert_eq!(ScwZone::from_str("fr-par-2"), Ok(ScwZone::Paris2));
|
||||
assert_eq!(ScwZone::from_str("fr-par-3"), Ok(ScwZone::Paris3));
|
||||
assert_eq!(ScwZone::from_str("nl-ams-1"), Ok(ScwZone::Amsterdam1));
|
||||
assert_eq!(ScwZone::from_str("pl-waw-1"), Ok(ScwZone::Warsaw1));
|
||||
}
|
||||
@@ -582,6 +590,7 @@ mod tests {
|
||||
fn test_zone_region() {
|
||||
assert_eq!(ScwZone::Paris1.region(), ScwRegion::Paris);
|
||||
assert_eq!(ScwZone::Paris2.region(), ScwRegion::Paris);
|
||||
assert_eq!(ScwZone::Paris3.region(), ScwRegion::Paris);
|
||||
assert_eq!(ScwZone::Amsterdam1.region(), ScwRegion::Amsterdam);
|
||||
assert_eq!(ScwZone::Warsaw1.region(), ScwRegion::Warsaw);
|
||||
}
|
||||
|
||||
@@ -189,6 +189,7 @@ impl Service for MongoDB {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_db_name", self.name.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
|
||||
@@ -221,6 +221,7 @@ impl Service for MySQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -230,6 +230,7 @@ impl Service for PostgreSQL {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_name", self.sanitized_name().as_str());
|
||||
context.insert("database_db_name", self.name());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
|
||||
@@ -189,6 +189,7 @@ impl Service for Redis {
|
||||
"fqdn",
|
||||
self.fqdn(target, &self.fqdn, self.is_managed_service()).as_str(),
|
||||
);
|
||||
context.insert("service_name", self.fqdn_id.as_str());
|
||||
context.insert("database_login", self.options.login.as_str());
|
||||
context.insert("database_password", self.options.password.as_str());
|
||||
context.insert("database_port", &self.private_port());
|
||||
|
||||
@@ -513,6 +513,27 @@ impl<'a> Kapsule<'a> {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
match self.check_workers_on_create() {
|
||||
Ok(_) => {
|
||||
let message = format!("Kubernetes {} nodes have been successfully created", self.name());
|
||||
info!("{}", &message);
|
||||
self.send_to_customer(&message, &listeners_helper);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Error while deploying cluster {} with Terraform with id {}.",
|
||||
self.name(),
|
||||
self.id()
|
||||
);
|
||||
return Err(LegacyEngineError {
|
||||
cause: EngineErrorCause::Internal,
|
||||
scope: EngineErrorScope::Engine,
|
||||
execution_id: self.context.execution_id().to_string(),
|
||||
message: e.message,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// kubernetes helm deployments on the cluster
|
||||
let kubeconfig = PathBuf::from(self.get_kubeconfig_file().expect("expected to get a kubeconfig file").0);
|
||||
let credentials_environment_variables: Vec<(String, String)> = self
|
||||
@@ -1183,7 +1204,7 @@ impl<'a> Kubernetes for Kapsule<'a> {
|
||||
match self.delete_crashlooping_pods(
|
||||
None,
|
||||
None,
|
||||
Some(10),
|
||||
Some(3),
|
||||
self.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
Ok(..) => {}
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::cloud_provider::metrics::KubernetesApiMetrics;
|
||||
use crate::cmd::structs::{
|
||||
Configmap, Daemonset, Item, KubernetesEvent, KubernetesJob, KubernetesKind, KubernetesList, KubernetesNode,
|
||||
KubernetesPod, KubernetesPodStatusPhase, KubernetesPodStatusReason, KubernetesService, KubernetesVersion,
|
||||
LabelsContent, Namespace, Secrets, PDB, PVC, SVC,
|
||||
LabelsContent, Namespace, Secrets, HPA, PDB, PVC, SVC,
|
||||
};
|
||||
use crate::cmd::utilities::QoveryCommand;
|
||||
use crate::constants::KUBECONFIG;
|
||||
@@ -1229,13 +1229,44 @@ where
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn kubernetes_get_all_pdbs<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<PDB, SimpleError>
|
||||
pub fn kubernetes_get_all_pdbs<P>(
|
||||
kubernetes_config: P,
|
||||
envs: Vec<(&str, &str)>,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<PDB, SimpleError>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
kubectl_exec::<P, PDB>(
|
||||
vec!["get", "pdb", "--all-namespaces", "-o", "json"],
|
||||
kubernetes_config,
|
||||
envs,
|
||||
)
|
||||
let mut cmd_args = vec!["get", "pdb", "-o", "json"];
|
||||
|
||||
match namespace {
|
||||
Some(n) => {
|
||||
cmd_args.push("-n");
|
||||
cmd_args.push(n);
|
||||
}
|
||||
None => cmd_args.push("--all-namespaces"),
|
||||
}
|
||||
|
||||
kubectl_exec::<P, PDB>(cmd_args, kubernetes_config, envs)
|
||||
}
|
||||
|
||||
pub fn kubernetes_get_all_hpas<P>(
|
||||
kubernetes_config: P,
|
||||
envs: Vec<(&str, &str)>,
|
||||
namespace: Option<&str>,
|
||||
) -> Result<HPA, SimpleError>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let mut cmd_args = vec!["get", "hpa", "-o", "json"];
|
||||
|
||||
match namespace {
|
||||
Some(n) => {
|
||||
cmd_args.push("-n");
|
||||
cmd_args.push(n);
|
||||
}
|
||||
None => cmd_args.push("--all-namespaces"),
|
||||
}
|
||||
|
||||
kubectl_exec::<P, HPA>(cmd_args, kubernetes_config, envs)
|
||||
}
|
||||
|
||||
@@ -286,6 +286,7 @@ pub struct KubernetesNodeStatus {
|
||||
pub allocatable: KubernetesNodeStatusResources,
|
||||
pub capacity: KubernetesNodeStatusResources,
|
||||
pub node_info: KubernetesNodeInfo,
|
||||
pub conditions: Vec<KubernetesNodeCondition>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
@@ -303,6 +304,14 @@ pub struct KubernetesNodeInfo {
|
||||
pub kubelet_version: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesNodeCondition {
|
||||
#[serde(rename = "type")]
|
||||
pub condition_type: String,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Eq, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesEvent {
|
||||
@@ -495,6 +504,35 @@ pub struct PDBStatus {
|
||||
pub observed_generation: i16,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HPA {
|
||||
pub api_version: String,
|
||||
pub items: Option<Vec<HPAItem>>,
|
||||
pub kind: String,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HPAItem {
|
||||
pub api_version: String,
|
||||
pub kind: String,
|
||||
pub metadata: HPAMetadata,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HPAMetadata {
|
||||
pub annotations: Option<HPAAnnotationCondition>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HPAAnnotationCondition {
|
||||
#[serde(rename = "autoscaling.alpha.kubernetes.io/conditions")]
|
||||
pub conditions: Option<String>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::cmd::structs::{KubernetesList, KubernetesPod, KubernetesPodStatusReason, PDB, PVC, SVC};
|
||||
|
||||
@@ -191,3 +191,55 @@ pub fn docker_tag_and_push_image(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn docker_pull_image(
|
||||
container_registry_kind: Kind,
|
||||
docker_envs: Vec<(&str, &str)>,
|
||||
dest: String,
|
||||
) -> Result<(), SimpleError> {
|
||||
let registry_provider = match container_registry_kind {
|
||||
Kind::DockerHub => "DockerHub",
|
||||
Kind::Ecr => "AWS ECR",
|
||||
Kind::Docr => "DigitalOcean Registry",
|
||||
Kind::ScalewayCr => "Scaleway Registry",
|
||||
};
|
||||
|
||||
let mut cmd = QoveryCommand::new("docker", &vec!["pull", dest.as_str()], &docker_envs);
|
||||
match retry::retry(Fibonacci::from_millis(5000).take(5), || {
|
||||
match cmd.exec_with_timeout(
|
||||
Duration::minutes(10),
|
||||
|line| info!("{}", line),
|
||||
|line| error!("{}", line),
|
||||
) {
|
||||
Ok(_) => OperationResult::Ok(()),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to pull image from {} registry {}, {:?} retrying...",
|
||||
registry_provider,
|
||||
dest.as_str(),
|
||||
e,
|
||||
);
|
||||
OperationResult::Retry(e)
|
||||
}
|
||||
}
|
||||
}) {
|
||||
Err(Operation { error, .. }) => Err(SimpleError::new(SimpleErrorKind::Other, Some(error.to_string()))),
|
||||
Err(e) => Err(SimpleError::new(
|
||||
SimpleErrorKind::Other,
|
||||
Some(format!(
|
||||
"unknown error while trying to pull image {} from {} registry. {:?}",
|
||||
dest.as_str(),
|
||||
registry_provider,
|
||||
e,
|
||||
)),
|
||||
)),
|
||||
_ => {
|
||||
info!(
|
||||
"image {} has successfully been pulled from {} registry",
|
||||
dest.as_str(),
|
||||
registry_provider,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ use reqwest::StatusCode;
|
||||
|
||||
use crate::build_platform::Image;
|
||||
use crate::cmd::utilities::QoveryCommand;
|
||||
use crate::container_registry::docker::docker_tag_and_push_image;
|
||||
use crate::container_registry::{ContainerRegistry, EngineError, Kind, PushResult};
|
||||
use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image};
|
||||
use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult};
|
||||
use crate::error::EngineErrorCause;
|
||||
use crate::models::{
|
||||
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
|
||||
@@ -31,6 +31,45 @@ impl DockerHub {
|
||||
listeners: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exec_docker_login(&self) -> Result<(), EngineError> {
|
||||
let envs = match self.context.docker_tcp_socket() {
|
||||
Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())],
|
||||
None => vec![],
|
||||
};
|
||||
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"docker",
|
||||
&vec!["login", "-u", self.login.as_str(), "-p", self.password.as_str()],
|
||||
&envs,
|
||||
);
|
||||
|
||||
match cmd.exec() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your DockerHub account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to DockerHub {}", self.name_with_id()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image(&self, dest: String, image: &Image) -> Result<PullResult, EngineError> {
|
||||
match docker_pull_image(self.kind(), vec![], dest.clone()) {
|
||||
Ok(_) => {
|
||||
let mut image = image.clone();
|
||||
image.registry_url = Some(dest);
|
||||
Ok(PullResult::Some(image))
|
||||
}
|
||||
Err(e) => Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
e.message
|
||||
.unwrap_or_else(|| "unknown error occurring during docker pull".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ContainerRegistry for DockerHub {
|
||||
@@ -100,26 +139,51 @@ impl ContainerRegistry for DockerHub {
|
||||
}
|
||||
}
|
||||
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let envs = match self.context.docker_tcp_socket() {
|
||||
Some(tcp_socket) => vec![("DOCKER_HOST", tcp_socket.as_str())],
|
||||
None => vec![],
|
||||
};
|
||||
fn pull(&self, image: &Image) -> Result<PullResult, EngineError> {
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"docker",
|
||||
&vec!["login", "-u", self.login.as_str(), "-p", self.password.as_str()],
|
||||
&envs,
|
||||
);
|
||||
if let Err(_) = cmd.exec() {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your DockerHub account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to DockerHub {}", self.name_with_id()),
|
||||
if !self.does_image_exists(image) {
|
||||
let info_message = format!(
|
||||
"image {:?} does not exist in DockerHub {} repository",
|
||||
image,
|
||||
self.name()
|
||||
);
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
};
|
||||
|
||||
return Ok(PullResult::None);
|
||||
}
|
||||
|
||||
let info_message = format!("pull image {:?} from DockerHub {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
let _ = self.exec_docker_login()?;
|
||||
|
||||
let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str());
|
||||
|
||||
// pull image
|
||||
self.pull_image(dest, image)
|
||||
}
|
||||
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let _ = self.exec_docker_login()?;
|
||||
|
||||
let dest = format!("{}/{}", self.login.as_str(), image.name_with_tag().as_str());
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
@@ -5,8 +5,8 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::build_platform::Image;
|
||||
use crate::cmd::utilities::QoveryCommand;
|
||||
use crate::container_registry::docker::docker_tag_and_push_image;
|
||||
use crate::container_registry::{ContainerRegistry, EngineError, Kind, PushResult};
|
||||
use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image};
|
||||
use crate::container_registry::{ContainerRegistry, EngineError, Kind, PullResult, PushResult};
|
||||
use crate::error::{cast_simple_error_to_engine_error, EngineErrorCause, SimpleError, SimpleErrorKind};
|
||||
use crate::models::{
|
||||
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
|
||||
@@ -120,7 +120,7 @@ impl DOCR {
|
||||
EngineErrorCause::Internal,
|
||||
e.message
|
||||
.unwrap_or_else(|| "unknown error occurring during docker push".to_string()),
|
||||
))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -189,6 +189,43 @@ impl DOCR {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exec_docr_login(&self) -> Result<(), EngineError> {
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"doctl",
|
||||
&vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()],
|
||||
&vec![],
|
||||
);
|
||||
|
||||
match cmd.exec() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your DOCR account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to DOCR {}", self.name_with_id()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image(&self, registry_name: String, dest: String, image: &Image) -> Result<PullResult, EngineError> {
|
||||
match docker_pull_image(self.kind(), vec![], dest.clone()) {
|
||||
Ok(_) => {
|
||||
let mut image = image.clone();
|
||||
image.registry_name = Some(registry_name.clone());
|
||||
// on DOCR registry secret is the same as registry name
|
||||
image.registry_secret = Some(registry_name);
|
||||
image.registry_url = Some(dest);
|
||||
Ok(PullResult::Some(image))
|
||||
}
|
||||
Err(e) => Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
e.message
|
||||
.unwrap_or_else(|| "unknown error occurring during docker pull".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ContainerRegistry for DOCR {
|
||||
@@ -305,6 +342,51 @@ impl ContainerRegistry for DOCR {
|
||||
}
|
||||
}
|
||||
|
||||
fn pull(&self, image: &Image) -> Result<PullResult, EngineError> {
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
if !self.does_image_exists(image) {
|
||||
let info_message = format!("image {:?} does not exist in DOCR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
return Ok(PullResult::None);
|
||||
}
|
||||
|
||||
let info_message = format!("pull image {:?} from DOCR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
let _ = self.exec_docr_login()?;
|
||||
|
||||
let registry_name = self.get_registry_name(image)?;
|
||||
|
||||
let dest = format!(
|
||||
"registry.digitalocean.com/{}/{}",
|
||||
registry_name.as_str(),
|
||||
image.name_with_tag()
|
||||
);
|
||||
|
||||
// pull image
|
||||
self.pull_image(registry_name, dest, image)
|
||||
}
|
||||
|
||||
// https://www.digitalocean.com/docs/images/container-registry/how-to/use-registry-docker-kubernetes/
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let registry_name = self.get_registry_name(image)?;
|
||||
@@ -314,20 +396,7 @@ impl ContainerRegistry for DOCR {
|
||||
Err(_) => warn!("DOCR {} already exists", registry_name.as_str()),
|
||||
};
|
||||
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"doctl",
|
||||
&vec!["registry", "login", self.name.as_str(), "-t", self.api_key.as_str()],
|
||||
&vec![],
|
||||
);
|
||||
if let Err(_) = cmd.exec() {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your DOCR account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to DOCR {}", self.name_with_id()),
|
||||
));
|
||||
};
|
||||
let _ = self.exec_docr_login()?;
|
||||
|
||||
let dest = format!(
|
||||
"registry.digitalocean.com/{}/{}",
|
||||
|
||||
@@ -10,8 +10,8 @@ use rusoto_sts::{GetCallerIdentityRequest, Sts, StsClient};
|
||||
|
||||
use crate::build_platform::Image;
|
||||
use crate::cmd::utilities::QoveryCommand;
|
||||
use crate::container_registry::docker::docker_tag_and_push_image;
|
||||
use crate::container_registry::{ContainerRegistry, Kind, PushResult};
|
||||
use crate::container_registry::docker::{docker_pull_image, docker_tag_and_push_image};
|
||||
use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult};
|
||||
use crate::error::{EngineError, EngineErrorCause};
|
||||
use crate::models::{
|
||||
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
|
||||
@@ -136,6 +136,24 @@ impl ECR {
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image(&self, dest: String, image: &Image) -> Result<PullResult, EngineError> {
|
||||
// READ https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html
|
||||
// docker pull aws_account_id.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest
|
||||
|
||||
match docker_pull_image(self.kind(), self.docker_envs(), dest.clone()) {
|
||||
Ok(_) => {
|
||||
let mut image = image.clone();
|
||||
image.registry_url = Some(dest);
|
||||
Ok(PullResult::Some(image))
|
||||
}
|
||||
Err(e) => Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
e.message
|
||||
.unwrap_or_else(|| "unknown error occurring during docker pull".to_string()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_repository(&self, image: &Image) -> Result<Repository, EngineError> {
|
||||
let repository_name = image.name.as_str();
|
||||
info!("creating ECR repository {}", &repository_name);
|
||||
@@ -263,6 +281,82 @@ impl ECR {
|
||||
|
||||
self.create_repository(image)
|
||||
}
|
||||
|
||||
fn get_credentials(&self) -> Result<ECRCredentials, EngineError> {
|
||||
let r = block_on(
|
||||
self.ecr_client()
|
||||
.get_authorization_token(GetAuthorizationTokenRequest::default()),
|
||||
);
|
||||
|
||||
let (access_token, password, endpoint_url) = match r {
|
||||
Ok(t) => match t.authorization_data {
|
||||
Some(authorization_data) => {
|
||||
let ad = authorization_data.first().unwrap();
|
||||
let b64_token = ad.authorization_token.as_ref().unwrap();
|
||||
|
||||
let decoded_token = base64::decode(b64_token).unwrap();
|
||||
let token = std::str::from_utf8(decoded_token.as_slice()).unwrap();
|
||||
|
||||
let s_token: Vec<&str> = token.split(':').collect::<Vec<_>>();
|
||||
|
||||
(
|
||||
s_token.first().unwrap().to_string(),
|
||||
s_token.get(1).unwrap().to_string(),
|
||||
ad.clone().proxy_endpoint.unwrap(),
|
||||
)
|
||||
}
|
||||
None => {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
format!(
|
||||
"failed to retrieve credentials and endpoint URL from ECR {}",
|
||||
self.name_with_id(),
|
||||
),
|
||||
));
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
format!(
|
||||
"failed to retrieve credentials and endpoint URL from ECR {}",
|
||||
self.name_with_id(),
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(ECRCredentials::new(access_token, password, endpoint_url))
|
||||
}
|
||||
|
||||
fn exec_docker_login(&self) -> Result<(), EngineError> {
|
||||
let credentials = self.get_credentials()?;
|
||||
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"docker",
|
||||
&vec![
|
||||
"login",
|
||||
"-u",
|
||||
credentials.access_token.as_str(),
|
||||
"-p",
|
||||
credentials.password.as_str(),
|
||||
credentials.endpoint_url.as_str(),
|
||||
],
|
||||
&self.docker_envs(),
|
||||
);
|
||||
|
||||
if let Err(_) = cmd.exec() {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your ECR account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to ECR {}", self.name_with_id()),
|
||||
));
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ContainerRegistry for ECR {
|
||||
@@ -319,50 +413,62 @@ impl ContainerRegistry for ECR {
|
||||
self.get_image(image).is_some()
|
||||
}
|
||||
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let r = block_on(
|
||||
self.ecr_client()
|
||||
.get_authorization_token(GetAuthorizationTokenRequest::default()),
|
||||
);
|
||||
fn pull(&self, image: &Image) -> Result<PullResult, EngineError> {
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
let (access_token, password, endpoint_url) = match r {
|
||||
Ok(t) => match t.authorization_data {
|
||||
Some(authorization_data) => {
|
||||
let ad = authorization_data.first().unwrap();
|
||||
let b64_token = ad.authorization_token.as_ref().unwrap();
|
||||
if !self.does_image_exists(image) {
|
||||
let info_message = format!("image {:?} does not exist in ECR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
let decoded_token = base64::decode(b64_token).unwrap();
|
||||
let token = std::str::from_utf8(decoded_token.as_slice()).unwrap();
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
let s_token: Vec<&str> = token.split(':').collect::<Vec<_>>();
|
||||
return Ok(PullResult::None);
|
||||
}
|
||||
|
||||
(
|
||||
s_token.first().unwrap().to_string(),
|
||||
s_token.get(1).unwrap().to_string(),
|
||||
ad.clone().proxy_endpoint.unwrap(),
|
||||
)
|
||||
}
|
||||
None => {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
format!(
|
||||
"failed to retrieve credentials and endpoint URL from ECR {}",
|
||||
self.name_with_id(),
|
||||
),
|
||||
));
|
||||
}
|
||||
let info_message = format!("pull image {:?} from ECR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
let _ = self.exec_docker_login()?;
|
||||
|
||||
let repository = match self.get_or_create_repository(image) {
|
||||
Ok(r) => r,
|
||||
_ => {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
format!(
|
||||
"failed to retrieve credentials and endpoint URL from ECR {}",
|
||||
"failed to create ECR repository for {} with image {:?}",
|
||||
self.name_with_id(),
|
||||
image,
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let dest = format!("{}:{}", repository.repository_uri.unwrap(), image.tag.as_str());
|
||||
|
||||
// pull image
|
||||
self.pull_image(dest, image)
|
||||
}
|
||||
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let _ = self.exec_docker_login()?;
|
||||
|
||||
let repository = match if force_push {
|
||||
self.create_repository(image)
|
||||
} else {
|
||||
@@ -381,29 +487,6 @@ impl ContainerRegistry for ECR {
|
||||
}
|
||||
};
|
||||
|
||||
let mut cmd = QoveryCommand::new(
|
||||
"docker",
|
||||
&vec![
|
||||
"login",
|
||||
"-u",
|
||||
access_token.as_str(),
|
||||
"-p",
|
||||
password.as_str(),
|
||||
endpoint_url.as_str(),
|
||||
],
|
||||
&self.docker_envs(),
|
||||
);
|
||||
|
||||
if let Err(_) = cmd.exec() {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your ECR account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to ECR {}", self.name_with_id()),
|
||||
));
|
||||
};
|
||||
|
||||
let dest = format!("{}:{}", repository.repository_uri.unwrap(), image.tag.as_str());
|
||||
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
@@ -468,3 +551,19 @@ impl Listen for ECR {
|
||||
self.listeners.push(listener);
|
||||
}
|
||||
}
|
||||
|
||||
struct ECRCredentials {
|
||||
access_token: String,
|
||||
password: String,
|
||||
endpoint_url: String,
|
||||
}
|
||||
|
||||
impl ECRCredentials {
|
||||
fn new(access_token: String, password: String, endpoint_url: String) -> Self {
|
||||
ECRCredentials {
|
||||
access_token,
|
||||
password,
|
||||
endpoint_url,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ pub trait ContainerRegistry: Listen {
|
||||
fn on_delete(&self) -> Result<(), EngineError>;
|
||||
fn on_delete_error(&self) -> Result<(), EngineError>;
|
||||
fn does_image_exists(&self, image: &Image) -> bool;
|
||||
fn pull(&self, image: &Image) -> Result<PullResult, EngineError>;
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError>;
|
||||
fn push_error(&self, image: &Image) -> Result<PushResult, EngineError>;
|
||||
fn engine_error_scope(&self) -> EngineErrorScope {
|
||||
@@ -43,6 +44,11 @@ pub struct PushResult {
|
||||
pub image: Image,
|
||||
}
|
||||
|
||||
pub enum PullResult {
|
||||
Some(Image),
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum Kind {
|
||||
|
||||
@@ -3,8 +3,10 @@ extern crate scaleway_api_rs;
|
||||
use crate::cloud_provider::scaleway::application::ScwZone;
|
||||
|
||||
use crate::build_platform::Image;
|
||||
use crate::container_registry::docker::{docker_login, docker_manifest_inspect, docker_tag_and_push_image};
|
||||
use crate::container_registry::{ContainerRegistry, Kind, PushResult};
|
||||
use crate::container_registry::docker::{
|
||||
docker_login, docker_manifest_inspect, docker_pull_image, docker_tag_and_push_image,
|
||||
};
|
||||
use crate::container_registry::{ContainerRegistry, Kind, PullResult, PushResult};
|
||||
use crate::error::{EngineError, EngineErrorCause};
|
||||
use crate::models::{
|
||||
Context, Listen, Listener, Listeners, ListenersHelper, ProgressInfo, ProgressLevel, ProgressScope,
|
||||
@@ -165,14 +167,14 @@ impl ScalewayCR {
|
||||
}
|
||||
}
|
||||
|
||||
fn push_image(&self, image_url: String, image: &Image) -> Result<PushResult, EngineError> {
|
||||
fn push_image(&self, dest: String, image: &Image) -> Result<PushResult, EngineError> {
|
||||
// https://www.scaleway.com/en/docs/deploy-an-image-from-registry-to-kubernetes-kapsule/
|
||||
match docker_tag_and_push_image(
|
||||
self.kind(),
|
||||
self.get_docker_envs(),
|
||||
image.name.clone(),
|
||||
image.tag.clone(),
|
||||
image_url,
|
||||
dest,
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
@@ -206,6 +208,21 @@ impl ScalewayCR {
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image(&self, dest: String, image: &Image) -> Result<PullResult, EngineError> {
|
||||
match docker_pull_image(self.kind(), self.get_docker_envs(), dest) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::Internal,
|
||||
e.message
|
||||
.unwrap_or_else(|| "unknown error occurring during docker pull".to_string()),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(PullResult::Some(image.clone()))
|
||||
}
|
||||
|
||||
pub fn create_registry_namespace(
|
||||
&self,
|
||||
image: &Image,
|
||||
@@ -295,6 +312,28 @@ impl ScalewayCR {
|
||||
.as_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
fn exec_docker_login(&self, registry_url: &String) -> Result<(), EngineError> {
|
||||
if docker_login(
|
||||
Kind::ScalewayCr,
|
||||
self.get_docker_envs(),
|
||||
self.login.clone(),
|
||||
self.secret_token.clone(),
|
||||
registry_url.clone(),
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your Scaleway account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to Scaleway {}", self.name_with_id()),
|
||||
));
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ContainerRegistry for ScalewayCR {
|
||||
@@ -361,6 +400,70 @@ impl ContainerRegistry for ScalewayCR {
|
||||
.is_some()
|
||||
}
|
||||
|
||||
fn pull(&self, image: &Image) -> Result<PullResult, EngineError> {
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
let mut image = image.clone();
|
||||
let registry_url: String;
|
||||
|
||||
match self.get_or_create_registry_namespace(&image) {
|
||||
Ok(registry) => {
|
||||
info!(
|
||||
"Scaleway registry namespace for {} has been created",
|
||||
image.name.as_str()
|
||||
);
|
||||
image.registry_name = Some(image.name.clone()); // Note: Repository namespace should have the same name as the image name
|
||||
image.registry_url = registry.endpoint.clone();
|
||||
image.registry_secret = Some(self.secret_token.clone());
|
||||
image.registry_docker_json_config = Some(self.get_docker_json_config_raw());
|
||||
registry_url = registry.endpoint.unwrap_or_else(|| "undefined".to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Scaleway registry namespace for {} cannot be created, error: {:?}",
|
||||
image.name.as_str(),
|
||||
e
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
if !self.does_image_exists(&image) {
|
||||
let info_message = format!("image {:?} does not exist in SCR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
return Ok(PullResult::None);
|
||||
}
|
||||
|
||||
let info_message = format!("pull image {:?} from SCR {} repository", image, self.name());
|
||||
info!("{}", info_message.as_str());
|
||||
|
||||
listeners_helper.deployment_in_progress(ProgressInfo::new(
|
||||
ProgressScope::Application {
|
||||
id: image.application_id.clone(),
|
||||
},
|
||||
ProgressLevel::Info,
|
||||
Some(info_message),
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
let _ = self.exec_docker_login(®istry_url)?;
|
||||
|
||||
let dest = format!("{}/{}", registry_url, image.name_with_tag());
|
||||
|
||||
// pull image
|
||||
self.pull_image(dest, &image)
|
||||
}
|
||||
|
||||
fn push(&self, image: &Image, force_push: bool) -> Result<PushResult, EngineError> {
|
||||
let mut image = image.clone();
|
||||
let registry_url: String;
|
||||
@@ -389,25 +492,9 @@ impl ContainerRegistry for ScalewayCR {
|
||||
}
|
||||
}
|
||||
|
||||
if docker_login(
|
||||
Kind::ScalewayCr,
|
||||
self.get_docker_envs(),
|
||||
self.login.clone(),
|
||||
self.secret_token.clone(),
|
||||
registry_url.clone(),
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return Err(self.engine_error(
|
||||
EngineErrorCause::User(
|
||||
"Your Scaleway account seems to be no longer valid (bad Credentials). \
|
||||
Please contact your Organization administrator to fix or change the Credentials.",
|
||||
),
|
||||
format!("failed to login to Scaleway {}", self.name_with_id()),
|
||||
));
|
||||
};
|
||||
let _ = self.exec_docker_login(®istry_url)?;
|
||||
|
||||
let image_url = format!("{}/{}", registry_url, image.name_with_tag());
|
||||
let dest = format!("{}/{}", registry_url, image.name_with_tag());
|
||||
|
||||
let listeners_helper = ListenersHelper::new(&self.listeners);
|
||||
|
||||
@@ -449,7 +536,7 @@ impl ContainerRegistry for ScalewayCR {
|
||||
self.context.execution_id(),
|
||||
));
|
||||
|
||||
self.push_image(image_url, &image)
|
||||
self.push_image(dest, &image)
|
||||
}
|
||||
|
||||
fn push_error(&self, image: &Image) -> Result<PushResult, EngineError> {
|
||||
|
||||
74
src/git.rs
74
src/git.rs
@@ -3,7 +3,7 @@ use std::path::Path;
|
||||
use git2::build::{CheckoutBuilder, RepoBuilder};
|
||||
use git2::ErrorCode::Auth;
|
||||
use git2::ResetType::Hard;
|
||||
use git2::{Cred, CredentialType, Error, RemoteCallbacks, Repository, SubmoduleUpdateOptions};
|
||||
use git2::{Cred, CredentialType, Error, Object, Oid, RemoteCallbacks, Repository, SubmoduleUpdateOptions};
|
||||
use url::Url;
|
||||
|
||||
// Credentials callback is called endlessly until the server return Auth Ok (or a definitive error)
|
||||
@@ -46,7 +46,7 @@ fn authentication_callback<'a>(
|
||||
};
|
||||
}
|
||||
|
||||
fn checkout(repo: &Repository, commit_id: &str) -> Result<(), Error> {
|
||||
fn checkout<'a>(repo: &'a Repository, commit_id: &'a str) -> Result<Object<'a>, Error> {
|
||||
let obj = repo.revparse_single(commit_id).map_err(|err| {
|
||||
let repo_url = repo
|
||||
.find_remote("origin")
|
||||
@@ -63,7 +63,8 @@ fn checkout(repo: &Repository, commit_id: &str) -> Result<(), Error> {
|
||||
let mut checkout_opts = CheckoutBuilder::new();
|
||||
checkout_opts.force().remove_ignored(true).remove_untracked(true);
|
||||
|
||||
repo.reset(&obj, Hard, Some(&mut checkout_opts))
|
||||
let _ = repo.reset(&obj, Hard, Some(&mut checkout_opts))?;
|
||||
Ok(obj)
|
||||
}
|
||||
|
||||
fn clone<P>(
|
||||
@@ -92,6 +93,11 @@ where
|
||||
// Get our repository
|
||||
let mut repo = RepoBuilder::new();
|
||||
repo.fetch_options(fo);
|
||||
|
||||
if into_dir.as_ref().exists() {
|
||||
let _ = std::fs::remove_dir_all(into_dir.as_ref());
|
||||
}
|
||||
|
||||
repo.clone(url.as_str(), into_dir.as_ref())
|
||||
}
|
||||
|
||||
@@ -108,7 +114,7 @@ where
|
||||
let repo = clone(repository_url, into_dir, get_credentials)?;
|
||||
|
||||
// position the repo at the correct commit
|
||||
checkout(&repo, commit_id)?;
|
||||
let _ = checkout(&repo, commit_id)?;
|
||||
|
||||
// check submodules if needed
|
||||
{
|
||||
@@ -133,9 +139,30 @@ where
|
||||
Ok(repo)
|
||||
}
|
||||
|
||||
pub fn get_parent_commit_id<P>(
|
||||
repository_url: &str,
|
||||
commit_id: &str,
|
||||
into_dir: P,
|
||||
get_credentials: &impl Fn(&str) -> Vec<(CredentialType, Cred)>,
|
||||
) -> Result<Option<String>, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
// clone repository
|
||||
let repo = clone(repository_url, into_dir, get_credentials)?;
|
||||
|
||||
let oid = Oid::from_str(commit_id)?;
|
||||
let commit = match repo.find_commit(oid) {
|
||||
Ok(commit) => commit,
|
||||
Err(_) => return Ok(None),
|
||||
};
|
||||
|
||||
Ok(commit.parent_ids().next().map(|x| x.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::git::{checkout, clone, clone_at_commit};
|
||||
use crate::git::{checkout, clone, clone_at_commit, get_parent_commit_id};
|
||||
use git2::{Cred, CredentialType};
|
||||
|
||||
struct DirectoryToDelete<'a> {
|
||||
@@ -232,10 +259,45 @@ mod tests {
|
||||
let commit = "9a9c1f4373c8128151a9def9ea3d838fa2ed33e8";
|
||||
assert_ne!(repo.head().unwrap().target().unwrap().to_string(), commit);
|
||||
let check = checkout(&repo, commit);
|
||||
assert!(matches!(check, Ok(())));
|
||||
assert!(matches!(check, Ok(_)));
|
||||
assert_eq!(repo.head().unwrap().target().unwrap().to_string(), commit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_git_parent_id() {
|
||||
let clone_dir = DirectoryToDelete {
|
||||
path: "/tmp/engine_test_parent_id",
|
||||
};
|
||||
|
||||
let result = get_parent_commit_id(
|
||||
"https://github.com/Qovery/engine-testing.git",
|
||||
"964f02f3a3065bc7f6fb745d679b1ddb21153cc7",
|
||||
clone_dir.path,
|
||||
&|_| vec![],
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, "1538fb6333b86798f0cf865558a28e729a98dace".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_git_parent_id_not_existing() {
|
||||
let clone_dir = DirectoryToDelete {
|
||||
path: "/tmp/engine_test_parent_id_not_existing",
|
||||
};
|
||||
|
||||
let result = get_parent_commit_id(
|
||||
"https://github.com/Qovery/engine-testing.git",
|
||||
"964f02f3a3065bc7f6fb745d679b1ddb21153cc0",
|
||||
clone_dir.path,
|
||||
&|_| vec![],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_git_submodule_with_ssh_key() {
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::hash::Hash;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use git2::{Cred, CredentialType, Error};
|
||||
use itertools::Itertools;
|
||||
use rand::distributions::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
|
||||
use crate::build_platform::{Build, BuildOptions, Credentials, GitRepository, Image, SshKey};
|
||||
use crate::cloud_provider::aws::databases::mongodb::MongoDB;
|
||||
@@ -15,12 +22,8 @@ use crate::cloud_provider::service::{DatabaseOptions, StatefulService, Stateless
|
||||
use crate::cloud_provider::utilities::VersionsNumber;
|
||||
use crate::cloud_provider::CloudProvider;
|
||||
use crate::cloud_provider::Kind as CPKind;
|
||||
use itertools::Itertools;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use crate::git;
|
||||
use crate::utilities::get_image_tag;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct QoveryIdentifier {
|
||||
@@ -347,24 +350,42 @@ impl Application {
|
||||
}
|
||||
|
||||
pub fn to_image(&self) -> Image {
|
||||
// Image tag == hash(root_path) + commit_id truncate to 127 char
|
||||
// https://github.com/distribution/distribution/blob/6affafd1f030087d88f88841bf66a8abe2bf4d24/reference/regexp.go#L41
|
||||
let mut hasher = DefaultHasher::new();
|
||||
self.to_image_with_commit(&self.commit_id)
|
||||
}
|
||||
|
||||
// If any of those variables changes, we'll get a new hash value, what results in a new image
|
||||
// build and avoids using cache. It is important to build a new image, as those variables may
|
||||
// affect the build result even if user didn't change his code.
|
||||
self.root_path.hash(&mut hasher);
|
||||
self.dockerfile_path.hash(&mut hasher);
|
||||
self.environment_vars.hash(&mut hasher);
|
||||
pub fn to_image_from_parent_commit<P>(&self, clone_repo_into_dir: P) -> Result<Option<Image>, Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let parent_commit_id = git::get_parent_commit_id(
|
||||
self.git_url.as_str(),
|
||||
self.commit_id.as_str(),
|
||||
clone_repo_into_dir,
|
||||
&|_| match &self.git_credentials {
|
||||
None => vec![],
|
||||
Some(creds) => vec![(
|
||||
CredentialType::USER_PASS_PLAINTEXT,
|
||||
Cred::userpass_plaintext(creds.login.as_str(), creds.access_token.as_str()).unwrap(),
|
||||
)],
|
||||
},
|
||||
)?;
|
||||
|
||||
let mut tag = format!("{}-{}", hasher.finish(), self.commit_id);
|
||||
tag.truncate(127);
|
||||
Ok(match parent_commit_id {
|
||||
Some(id) => Some(self.to_image_with_commit(&id)),
|
||||
None => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_image_with_commit(&self, commit_id: &String) -> Image {
|
||||
Image {
|
||||
application_id: self.id.clone(),
|
||||
name: self.name.clone(),
|
||||
tag,
|
||||
tag: get_image_tag(
|
||||
&self.root_path,
|
||||
&self.dockerfile_path,
|
||||
&self.environment_vars,
|
||||
commit_id,
|
||||
),
|
||||
commit_id: self.commit_id.clone(),
|
||||
registry_name: None,
|
||||
registry_secret: None,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
|
||||
use crate::build_platform::BuildResult;
|
||||
use crate::build_platform::{BuildResult, CacheResult};
|
||||
use crate::cloud_provider::kubernetes::Kubernetes;
|
||||
use crate::cloud_provider::service::{Application, Service};
|
||||
use crate::container_registry::PushResult;
|
||||
@@ -134,7 +134,53 @@ impl<'a> Transaction<'a> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _build_applications(
|
||||
fn load_build_app_cache(&self, app: &crate::models::Application) -> Result<(), EngineError> {
|
||||
// do load build cache before building app
|
||||
let build = app.to_build();
|
||||
let _ = match self.engine.build_platform().has_cache(&build) {
|
||||
Ok(CacheResult::MissWithoutParentBuild) => {
|
||||
info!("first build for app {} - cache miss", app.name.as_str());
|
||||
}
|
||||
Ok(CacheResult::Hit) => {
|
||||
info!("cache hit for app {}", app.name.as_str());
|
||||
}
|
||||
Ok(CacheResult::Miss(parent_build)) => {
|
||||
info!("cache miss for app {}", app.name.as_str());
|
||||
|
||||
let container_registry = self.engine.container_registry();
|
||||
|
||||
// pull image from container registry
|
||||
// FIXME: if one day we use something else than LocalDocker to build image
|
||||
// FIXME: we'll need to send the PullResult to the Build implementation
|
||||
let _ = match container_registry.pull(&parent_build.image) {
|
||||
Ok(pull_result) => pull_result,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"{}",
|
||||
err.message.clone().unwrap_or(format!(
|
||||
"something goes wrong while pulling image from {:?} container registry",
|
||||
container_registry.kind()
|
||||
))
|
||||
);
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"load build app {} cache error: {}",
|
||||
app.name.as_str(),
|
||||
err.message.clone().unwrap_or("<no message>".to_string())
|
||||
);
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_applications(
|
||||
&self,
|
||||
environment: &Environment,
|
||||
option: &DeploymentOption,
|
||||
@@ -151,6 +197,9 @@ impl<'a> Transaction<'a> {
|
||||
let image = app.to_image();
|
||||
let build_result = if option.force_build || !self.engine.container_registry().does_image_exists(&image)
|
||||
{
|
||||
// If an error occurred we can skip it. It's not critical.
|
||||
let _ = self.load_build_app_cache(app);
|
||||
|
||||
// only if the build is forced OR if the image does not exist in the registry
|
||||
self.engine.build_platform().build(app.to_build(), option.force_build)
|
||||
} else {
|
||||
@@ -185,7 +234,7 @@ impl<'a> Transaction<'a> {
|
||||
Ok(applications)
|
||||
}
|
||||
|
||||
fn _push_applications(
|
||||
fn push_applications(
|
||||
&self,
|
||||
applications: Vec<Box<dyn Application>>,
|
||||
option: &DeploymentOption,
|
||||
@@ -393,8 +442,8 @@ impl<'a> Transaction<'a> {
|
||||
EnvironmentAction::EnvironmentWithFailover(te, _) => te,
|
||||
};
|
||||
|
||||
let apps_result = match self._build_applications(target_environment, option) {
|
||||
Ok(applications) => match self._push_applications(applications, option) {
|
||||
let apps_result = match self.build_applications(target_environment, option) {
|
||||
Ok(applications) => match self.push_applications(applications, option) {
|
||||
Ok(results) => {
|
||||
let applications = results.into_iter().map(|(app, _)| app).collect::<Vec<_>>();
|
||||
|
||||
@@ -423,8 +472,8 @@ impl<'a> Transaction<'a> {
|
||||
|
||||
// build as well the failover environment, retention could remove the application image
|
||||
if let EnvironmentAction::EnvironmentWithFailover(_, fe) = environment_action {
|
||||
let apps_result = match self._build_applications(fe, option) {
|
||||
Ok(applications) => match self._push_applications(applications, option) {
|
||||
let apps_result = match self.build_applications(fe, option) {
|
||||
Ok(applications) => match self.push_applications(applications, option) {
|
||||
Ok(results) => {
|
||||
let applications = results.into_iter().map(|(app, _)| app).collect::<Vec<_>>();
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::collections::BTreeMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use reqwest::header;
|
||||
use reqwest::header::{HeaderMap, HeaderValue};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
// generate the right header for digital ocean with token
|
||||
pub fn get_header_with_bearer(token: &str) -> HeaderMap<HeaderValue> {
|
||||
@@ -16,3 +18,95 @@ pub fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
t.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
|
||||
pub fn get_image_tag(
|
||||
root_path: &String,
|
||||
dockerfile_path: &Option<String>,
|
||||
environment_variables: &BTreeMap<String, String>,
|
||||
commit_id: &String,
|
||||
) -> String {
|
||||
// Image tag == hash(root_path) + commit_id truncate to 127 char
|
||||
// https://github.com/distribution/distribution/blob/6affafd1f030087d88f88841bf66a8abe2bf4d24/reference/regexp.go#L41
|
||||
let mut hasher = DefaultHasher::new();
|
||||
|
||||
// If any of those variables changes, we'll get a new hash value, what results in a new image
|
||||
// build and avoids using cache. It is important to build a new image, as those variables may
|
||||
// affect the build result even if user didn't change his code.
|
||||
root_path.hash(&mut hasher);
|
||||
|
||||
if dockerfile_path.is_some() {
|
||||
// only use when a Dockerfile is used to prevent build cache miss every single time
|
||||
// we redeploy an app with a env var changed with Buildpacks.
|
||||
dockerfile_path.hash(&mut hasher);
|
||||
|
||||
// TODO check if the environment variables are used in the Dockerfile and only Hash the one that are used
|
||||
environment_variables.hash(&mut hasher);
|
||||
}
|
||||
|
||||
let mut tag = format!("{}-{}", hasher.finish(), commit_id);
|
||||
tag.truncate(127);
|
||||
|
||||
tag
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_utilities {
|
||||
use crate::utilities::get_image_tag;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
#[test]
|
||||
fn test_get_image_tag() {
|
||||
let image_tag = get_image_tag(
|
||||
&"/".to_string(),
|
||||
&Some("Dockerfile".to_string()),
|
||||
&BTreeMap::new(),
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
let image_tag_2 = get_image_tag(
|
||||
&"/".to_string(),
|
||||
&Some("Dockerfile.qovery".to_string()),
|
||||
&BTreeMap::new(),
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
assert_ne!(image_tag, image_tag_2);
|
||||
|
||||
let image_tag_3 = get_image_tag(
|
||||
&"/xxx".to_string(),
|
||||
&Some("Dockerfile.qovery".to_string()),
|
||||
&BTreeMap::new(),
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
assert_ne!(image_tag, image_tag_3);
|
||||
|
||||
let image_tag_3_2 = get_image_tag(
|
||||
&"/xxx".to_string(),
|
||||
&Some("Dockerfile.qovery".to_string()),
|
||||
&BTreeMap::new(),
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
assert_eq!(image_tag_3, image_tag_3_2);
|
||||
|
||||
let image_tag_4 = get_image_tag(
|
||||
&"/".to_string(),
|
||||
&None,
|
||||
&BTreeMap::new(),
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
let mut env_vars_5 = BTreeMap::new();
|
||||
env_vars_5.insert("toto".to_string(), "key".to_string());
|
||||
|
||||
let image_tag_5 = get_image_tag(
|
||||
&"/".to_string(),
|
||||
&None,
|
||||
&env_vars_5,
|
||||
&"63d8c437337416a7067d3f358197ac47d003fab9".to_string(),
|
||||
);
|
||||
|
||||
assert_eq!(image_tag_4, image_tag_5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,12 +32,15 @@ use qovery_engine::cloud_provider::scaleway::application::ScwZone;
|
||||
use qovery_engine::cloud_provider::scaleway::kubernetes::Kapsule;
|
||||
use qovery_engine::cloud_provider::scaleway::Scaleway;
|
||||
use qovery_engine::cloud_provider::{CloudProvider, Kind};
|
||||
use qovery_engine::cmd::kubectl::kubernetes_get_all_hpas;
|
||||
use qovery_engine::cmd::structs::SVCItem;
|
||||
use qovery_engine::engine::Engine;
|
||||
use qovery_engine::error::{SimpleError, SimpleErrorKind};
|
||||
use qovery_engine::logger::Logger;
|
||||
use qovery_engine::models::DatabaseMode::CONTAINER;
|
||||
use qovery_engine::transaction::DeploymentOption;
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use tracing::{span, Level};
|
||||
|
||||
@@ -217,7 +220,6 @@ pub fn environment_3_apps_3_routers_3_databases(
|
||||
let version_mongo = "4.4";
|
||||
|
||||
// pSQL 1 management part
|
||||
let fqdn_id = "my-postgresql-".to_string() + generate_id().as_str();
|
||||
let fqdn = get_svc_name(DatabaseKind::Postgresql, provider_kind.clone()).to_string();
|
||||
let database_port = 5432;
|
||||
let database_username = "superuser".to_string();
|
||||
@@ -225,7 +227,6 @@ pub fn environment_3_apps_3_routers_3_databases(
|
||||
let database_name = "postgres".to_string();
|
||||
|
||||
// pSQL 2 management part
|
||||
let fqdn_id_2 = "my-postgresql-2".to_string() + generate_id().as_str();
|
||||
let fqdn_2 = format!("{}2", get_svc_name(DatabaseKind::Postgresql, provider_kind.clone()));
|
||||
let database_username_2 = "superuser2".to_string();
|
||||
let database_name_2 = "postgres2".to_string();
|
||||
@@ -427,7 +428,7 @@ pub fn environment_3_apps_3_routers_3_databases(
|
||||
id: generate_id(),
|
||||
name: database_name.clone(),
|
||||
version: "11.8.0".to_string(),
|
||||
fqdn_id: fqdn_id.clone(),
|
||||
fqdn_id: fqdn.clone(),
|
||||
fqdn: fqdn.clone(),
|
||||
port: database_port.clone(),
|
||||
username: database_username.clone(),
|
||||
@@ -449,7 +450,7 @@ pub fn environment_3_apps_3_routers_3_databases(
|
||||
id: generate_id(),
|
||||
name: database_name_2.clone(),
|
||||
version: "11.8.0".to_string(),
|
||||
fqdn_id: fqdn_id_2.clone(),
|
||||
fqdn_id: fqdn_2.clone(),
|
||||
fqdn: fqdn_2.clone(),
|
||||
port: database_port.clone(),
|
||||
username: database_username_2.clone(),
|
||||
@@ -471,7 +472,7 @@ pub fn environment_3_apps_3_routers_3_databases(
|
||||
id: generate_id(),
|
||||
name: database_db_name_mongo.clone(),
|
||||
version: version_mongo.to_string(),
|
||||
fqdn_id: "mongodb-".to_string() + generate_id().as_str(),
|
||||
fqdn_id: database_host_mongo.clone(),
|
||||
fqdn: database_host_mongo.clone(),
|
||||
port: database_port_mongo.clone(),
|
||||
username: database_username_mongo.clone(),
|
||||
@@ -574,13 +575,11 @@ pub fn environment_only_http_server_router_with_sticky_session(context: &Context
|
||||
|
||||
pub fn environnement_2_app_2_routers_1_psql(
|
||||
context: &Context,
|
||||
|
||||
test_domain: &str,
|
||||
database_instance_type: &str,
|
||||
database_disk_type: &str,
|
||||
provider_kind: Kind,
|
||||
) -> Environment {
|
||||
let fqdn_id = "my-postgresql-".to_string() + generate_id().as_str();
|
||||
let fqdn = get_svc_name(DatabaseKind::Postgresql, provider_kind.clone()).to_string();
|
||||
|
||||
let database_port = 5432;
|
||||
@@ -605,7 +604,7 @@ pub fn environnement_2_app_2_routers_1_psql(
|
||||
id: generate_id(),
|
||||
name: database_name.clone(),
|
||||
version: "11.8.0".to_string(),
|
||||
fqdn_id: fqdn_id.clone(),
|
||||
fqdn_id: fqdn.clone(),
|
||||
fqdn: fqdn.clone(),
|
||||
port: database_port.clone(),
|
||||
username: database_username.clone(),
|
||||
@@ -995,44 +994,42 @@ pub fn test_db(
|
||||
let database_username = "superuser".to_string();
|
||||
let database_password = generate_id();
|
||||
let db_kind_str = db_kind.name().to_string();
|
||||
let database_host = format!(
|
||||
"{}-{}.{}.{}",
|
||||
db_kind_str.clone(),
|
||||
generate_id(),
|
||||
let db_id = generate_id();
|
||||
let database_host = format!("{}-{}", db_id, db_kind_str.clone());
|
||||
let database_fqdn = format!(
|
||||
"{}.{}.{}",
|
||||
database_host,
|
||||
context.cluster_id(),
|
||||
secrets
|
||||
.clone()
|
||||
.DEFAULT_TEST_DOMAIN
|
||||
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
|
||||
);
|
||||
let dyn_db_fqdn = match is_public.clone() {
|
||||
true => database_host.clone(),
|
||||
false => match database_mode.clone() {
|
||||
DatabaseMode::MANAGED => format!("{}-dns", app_id.clone()),
|
||||
DatabaseMode::CONTAINER => get_svc_name(db_kind.clone(), provider_kind.clone()).to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
let db_infos = get_db_infos(
|
||||
db_kind.clone(),
|
||||
db_id.clone(),
|
||||
database_mode.clone(),
|
||||
database_username.clone(),
|
||||
database_password.clone(),
|
||||
dyn_db_fqdn.clone(),
|
||||
if is_public {
|
||||
database_fqdn.clone()
|
||||
} else {
|
||||
database_host.clone()
|
||||
},
|
||||
);
|
||||
let database_port = db_infos.db_port.clone();
|
||||
let database_db_name = db_infos.db_name.clone();
|
||||
let storage_size = 10;
|
||||
let db_disk_type = db_disk_type(provider_kind.clone(), database_mode.clone());
|
||||
let db_instance_type = db_instance_type(provider_kind.clone(), db_kind.clone(), database_mode.clone());
|
||||
let mut db = Database {
|
||||
kind: db_kind.clone(),
|
||||
action: Action::Create,
|
||||
id: app_id.clone(),
|
||||
name: database_db_name.clone(),
|
||||
id: db_id.clone(),
|
||||
name: db_id.clone(),
|
||||
version: version.to_string(),
|
||||
fqdn_id: format!("{}-{}", db_kind_str.clone(), generate_id()),
|
||||
fqdn: database_host.clone(),
|
||||
fqdn_id: database_host.clone(),
|
||||
fqdn: database_fqdn.clone(),
|
||||
port: database_port.clone(),
|
||||
username: database_username.clone(),
|
||||
password: database_password.clone(),
|
||||
@@ -1055,6 +1052,8 @@ pub fn test_db(
|
||||
.applications
|
||||
.into_iter()
|
||||
.map(|mut app| {
|
||||
app.id = app_id.clone();
|
||||
app.name = app_id.clone();
|
||||
app.branch = app_name.clone();
|
||||
app.commit_id = db_infos.app_commit.clone();
|
||||
app.ports = vec![Port {
|
||||
@@ -1071,7 +1070,7 @@ pub fn test_db(
|
||||
app
|
||||
})
|
||||
.collect::<Vec<qovery_engine::models::Application>>();
|
||||
environment.routers[0].routes[0].application_name = app_name.clone();
|
||||
environment.routers[0].routes[0].application_name = app_id.clone();
|
||||
|
||||
let mut environment_delete = environment.clone();
|
||||
environment_delete.action = Action::Delete;
|
||||
@@ -1228,11 +1227,7 @@ fn db_unit_tests(
|
||||
svc.items
|
||||
.expect("No items in svc")
|
||||
.into_iter()
|
||||
.filter(|svc| svc
|
||||
.metadata
|
||||
.name
|
||||
.contains(get_svc_name(db_kind.clone(), provider_kind.clone()))
|
||||
&& &svc.spec.svc_type == "LoadBalancer")
|
||||
.filter(|svc| svc.metadata.name == database_host && &svc.spec.svc_type == "LoadBalancer")
|
||||
.collect::<Vec<SVCItem>>()
|
||||
.len(),
|
||||
match is_public {
|
||||
@@ -1250,17 +1245,14 @@ fn db_unit_tests(
|
||||
.items
|
||||
.expect("No items in svc")
|
||||
.into_iter()
|
||||
.filter(|svc| {
|
||||
svc.metadata.name.contains(format!("{}-dns", app_id.clone()).as_str())
|
||||
&& svc.spec.svc_type == "ExternalName"
|
||||
})
|
||||
.filter(|svc| svc.metadata.name == database_host && svc.spec.svc_type == "ExternalName")
|
||||
.collect::<Vec<SVCItem>>();
|
||||
let annotations = &service[0].metadata.annotations;
|
||||
assert_eq!(service.len(), 1);
|
||||
match is_public {
|
||||
true => {
|
||||
assert!(annotations.contains_key("external-dns.alpha.kubernetes.io/hostname"));
|
||||
assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_host);
|
||||
assert_eq!(annotations["external-dns.alpha.kubernetes.io/hostname"], database_fqdn);
|
||||
}
|
||||
false => assert!(!annotations.contains_key("external-dns.alpha.kubernetes.io/hostname")),
|
||||
}
|
||||
@@ -1531,6 +1523,15 @@ pub fn cluster_test(
|
||||
};
|
||||
}
|
||||
|
||||
if let Err(err) = metrics_server_test(
|
||||
kubernetes
|
||||
.get_kubeconfig_file_path()
|
||||
.expect("Unable to get config file path"),
|
||||
kubernetes.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
panic!("{:?}", err)
|
||||
}
|
||||
|
||||
match test_type {
|
||||
ClusterTestType::Classic => {}
|
||||
ClusterTestType::WithPause => {
|
||||
@@ -1556,6 +1557,15 @@ pub fn cluster_test(
|
||||
TransactionResult::Rollback(_) => assert!(false),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
|
||||
if let Err(err) = metrics_server_test(
|
||||
kubernetes
|
||||
.get_kubeconfig_file_path()
|
||||
.expect("Unable to get config file path"),
|
||||
kubernetes.cloud_provider().credentials_environment_variables(),
|
||||
) {
|
||||
panic!("{:?}", err)
|
||||
}
|
||||
}
|
||||
ClusterTestType::WithUpgrade => {
|
||||
let upgrade_to_version = format!("{}.{}", major_boot_version, minor_boot_version.clone() + 1);
|
||||
@@ -1586,6 +1596,19 @@ pub fn cluster_test(
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
|
||||
if let Err(err) = metrics_server_test(
|
||||
upgraded_kubernetes
|
||||
.as_ref()
|
||||
.get_kubeconfig_file_path()
|
||||
.expect("Unable to get config file path"),
|
||||
upgraded_kubernetes
|
||||
.as_ref()
|
||||
.cloud_provider()
|
||||
.credentials_environment_variables(),
|
||||
) {
|
||||
panic!("{:?}", err)
|
||||
}
|
||||
|
||||
// Delete
|
||||
if let Err(err) = delete_tx.delete_kubernetes(upgraded_kubernetes.as_ref()) {
|
||||
panic!("{:?}", err)
|
||||
@@ -1627,3 +1650,32 @@ pub fn cluster_test(
|
||||
|
||||
test_name.to_string()
|
||||
}
|
||||
|
||||
pub fn metrics_server_test<P>(kubernetes_config: P, envs: Vec<(&str, &str)>) -> Result<(), SimpleError>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let result = kubernetes_get_all_hpas(kubernetes_config, envs, None);
|
||||
|
||||
match result {
|
||||
Ok(hpas) => {
|
||||
for hpa in hpas.items.expect("No hpa item").into_iter() {
|
||||
if !hpa
|
||||
.metadata
|
||||
.annotations
|
||||
.expect("No hpa annotation.")
|
||||
.conditions
|
||||
.expect("No hpa condition.")
|
||||
.contains("ValidMetricFound")
|
||||
{
|
||||
return Err(SimpleError {
|
||||
kind: SimpleErrorKind::Other,
|
||||
message: Some("Metrics server doesn't work".to_string()),
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -979,6 +979,7 @@ pub struct DBInfos {
|
||||
|
||||
pub fn get_db_infos(
|
||||
db_kind: DatabaseKind,
|
||||
db_id: String,
|
||||
database_mode: DatabaseMode,
|
||||
database_username: String,
|
||||
database_password: String,
|
||||
@@ -987,7 +988,7 @@ pub fn get_db_infos(
|
||||
match db_kind {
|
||||
DatabaseKind::Mongodb => {
|
||||
let database_port = 27017;
|
||||
let database_db_name = "my-mongodb".to_string();
|
||||
let database_db_name = db_id;
|
||||
let database_uri = format!(
|
||||
"mongodb://{}:{}@{}:{}/{}",
|
||||
database_username,
|
||||
@@ -1013,7 +1014,7 @@ pub fn get_db_infos(
|
||||
}
|
||||
DatabaseKind::Mysql => {
|
||||
let database_port = 3306;
|
||||
let database_db_name = "mysqldatabase".to_string();
|
||||
let database_db_name = db_id;
|
||||
DBInfos {
|
||||
db_port: database_port.clone(),
|
||||
db_name: database_db_name.to_string(),
|
||||
@@ -1029,7 +1030,11 @@ pub fn get_db_infos(
|
||||
}
|
||||
DatabaseKind::Postgresql => {
|
||||
let database_port = 5432;
|
||||
let database_db_name = "postgres".to_string();
|
||||
let database_db_name = if database_mode == MANAGED {
|
||||
"postgres".to_string()
|
||||
} else {
|
||||
db_id
|
||||
};
|
||||
DBInfos {
|
||||
db_port: database_port.clone(),
|
||||
db_name: database_db_name.to_string(),
|
||||
@@ -1045,7 +1050,7 @@ pub fn get_db_infos(
|
||||
}
|
||||
DatabaseKind::Redis => {
|
||||
let database_port = 6379;
|
||||
let database_db_name = "my-redis".to_string();
|
||||
let database_db_name = db_id;
|
||||
DBInfos {
|
||||
db_port: database_port.clone(),
|
||||
db_name: database_db_name.to_string(),
|
||||
|
||||
@@ -157,122 +157,6 @@ fn deploy_an_environment_with_db_and_pause_it() {
|
||||
})
|
||||
}
|
||||
|
||||
// this test ensure containers databases are never restarted, even in failover environment case
|
||||
#[cfg(feature = "test-aws-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
fn postgresql_failover_dev_environment_with_all_options() {
|
||||
let test_name = function_name!();
|
||||
engine_run_test(|| {
|
||||
init();
|
||||
|
||||
let span = span!(Level::INFO, "test", name = test_name);
|
||||
let _enter = span.enter();
|
||||
|
||||
let secrets = FuncTestsSecrets::new();
|
||||
let logger = logger();
|
||||
let context = context(
|
||||
secrets
|
||||
.AWS_TEST_ORGANIZATION_ID
|
||||
.as_ref()
|
||||
.expect("AWS_TEST_ORGANIZATION_ID is not set")
|
||||
.as_str(),
|
||||
secrets
|
||||
.AWS_TEST_CLUSTER_ID
|
||||
.as_ref()
|
||||
.expect("AWS_TEST_CLUSTER_ID is not set")
|
||||
.as_str(),
|
||||
);
|
||||
let context_for_deletion = context.clone_not_same_execution_id();
|
||||
let test_domain = secrets
|
||||
.clone()
|
||||
.DEFAULT_TEST_DOMAIN
|
||||
.expect("DEFAULT_TEST_DOMAIN is not set in secrets");
|
||||
|
||||
let environment = test_utilities::common::environnement_2_app_2_routers_1_psql(
|
||||
&context,
|
||||
test_domain.as_str(),
|
||||
AWS_DATABASE_INSTANCE_TYPE,
|
||||
AWS_DATABASE_DISK_TYPE,
|
||||
Kind::Aws,
|
||||
);
|
||||
let environment_check = environment.clone();
|
||||
let mut environment_never_up = environment.clone();
|
||||
// error in ports, these applications will never be up !!
|
||||
environment_never_up.applications = environment_never_up
|
||||
.applications
|
||||
.into_iter()
|
||||
.map(|mut app| {
|
||||
app.ports = vec![Port {
|
||||
id: "zdf7d6aad".to_string(),
|
||||
long_id: Default::default(),
|
||||
port: 4789,
|
||||
public_port: Some(443),
|
||||
name: None,
|
||||
publicly_accessible: true,
|
||||
protocol: Protocol::HTTP,
|
||||
}];
|
||||
app
|
||||
})
|
||||
.collect::<Vec<qovery_engine::models::Application>>();
|
||||
let mut environment_delete = test_utilities::common::environnement_2_app_2_routers_1_psql(
|
||||
&context_for_deletion,
|
||||
test_domain.as_str(),
|
||||
AWS_DATABASE_INSTANCE_TYPE,
|
||||
AWS_DATABASE_DISK_TYPE,
|
||||
Kind::Aws,
|
||||
);
|
||||
|
||||
environment_delete.action = Action::Delete;
|
||||
|
||||
let ea = EnvironmentAction::Environment(environment.clone());
|
||||
let ea_fail_ok = EnvironmentAction::EnvironmentWithFailover(environment_never_up.clone(), environment.clone());
|
||||
let ea_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
|
||||
|
||||
match environment.deploy_environment(Kind::Aws, &context, &ea, logger.clone()) {
|
||||
TransactionResult::Ok => assert!(true),
|
||||
TransactionResult::Rollback(_) => assert!(false),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
|
||||
let database_name = format!("postgresql{}-0", &environment_check.databases[0].name);
|
||||
match is_pod_restarted_env(
|
||||
context.clone(),
|
||||
Kind::Aws,
|
||||
environment_check.clone(),
|
||||
database_name.as_str(),
|
||||
secrets.clone(),
|
||||
) {
|
||||
(true, _) => assert!(true),
|
||||
(false, _) => assert!(false),
|
||||
}
|
||||
match environment_never_up.deploy_environment(Kind::Aws, &context, &ea_fail_ok, logger.clone()) {
|
||||
TransactionResult::Ok => assert!(false),
|
||||
TransactionResult::Rollback(_) => assert!(true),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY EVEN IF FAIL
|
||||
match is_pod_restarted_env(
|
||||
context.clone(),
|
||||
Kind::Aws,
|
||||
environment_check.clone(),
|
||||
database_name.as_str(),
|
||||
secrets.clone(),
|
||||
) {
|
||||
(true, _) => assert!(true),
|
||||
(false, _) => assert!(false),
|
||||
}
|
||||
|
||||
match environment_delete.delete_environment(Kind::Aws, &context_for_deletion, &ea_for_deletion, logger) {
|
||||
TransactionResult::Ok => assert!(true),
|
||||
TransactionResult::Rollback(_) => assert!(false),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
|
||||
return test_name.to_string();
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure a full environment can run correctly
|
||||
#[cfg(feature = "test-aws-self-hosted")]
|
||||
#[named]
|
||||
@@ -398,7 +282,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
|
||||
id: generate_id(),
|
||||
name: database_db_name.clone(),
|
||||
version: "11.8.0".to_string(),
|
||||
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
|
||||
fqdn_id: database_host.clone(),
|
||||
fqdn: database_host.clone(),
|
||||
port: database_port,
|
||||
username: database_username.clone(),
|
||||
|
||||
@@ -5,12 +5,17 @@ use self::test_utilities::utilities::{
|
||||
engine_run_test, generate_id, get_pods, get_pvc, is_pod_restarted_env, logger, FuncTestsSecrets,
|
||||
};
|
||||
use ::function_name::named;
|
||||
use qovery_engine::build_platform::{BuildPlatform, BuildResult, CacheResult};
|
||||
use qovery_engine::cloud_provider::Kind;
|
||||
use qovery_engine::cmd::kubectl::kubernetes_get_all_pdbs;
|
||||
use qovery_engine::container_registry::{ContainerRegistry, PullResult, PushResult};
|
||||
use qovery_engine::error::EngineError;
|
||||
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType};
|
||||
use qovery_engine::transaction::TransactionResult;
|
||||
use std::collections::BTreeMap;
|
||||
use test_utilities::utilities::{context, init, kubernetes_config_path};
|
||||
use std::time::SystemTime;
|
||||
use test_utilities::aws::container_registry_ecr;
|
||||
use test_utilities::utilities::{build_platform_local_docker, context, init, kubernetes_config_path};
|
||||
use tracing::{span, Level};
|
||||
|
||||
// TODO:
|
||||
@@ -74,6 +79,97 @@ fn deploy_a_working_environment_with_no_router_on_aws_eks() {
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-aws-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
fn test_build_cache() {
|
||||
let test_name = function_name!();
|
||||
engine_run_test(|| {
|
||||
init();
|
||||
let span = span!(Level::INFO, "test", name = test_name);
|
||||
let _enter = span.enter();
|
||||
|
||||
let logger = logger();
|
||||
let secrets = FuncTestsSecrets::new();
|
||||
let context = context(
|
||||
secrets
|
||||
.AWS_TEST_ORGANIZATION_ID
|
||||
.as_ref()
|
||||
.expect("AWS_TEST_ORGANIZATION_ID is not set")
|
||||
.as_str(),
|
||||
secrets
|
||||
.AWS_TEST_CLUSTER_ID
|
||||
.as_ref()
|
||||
.expect("AWS_TEST_CLUSTER_ID is not set")
|
||||
.as_str(),
|
||||
);
|
||||
|
||||
let mut environment = test_utilities::common::working_minimal_environment(
|
||||
&context,
|
||||
secrets
|
||||
.DEFAULT_TEST_DOMAIN
|
||||
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
|
||||
.as_str(),
|
||||
);
|
||||
|
||||
let ecr = container_registry_ecr(&context);
|
||||
let local_docker = build_platform_local_docker(&context);
|
||||
let app = environment.applications.first().unwrap();
|
||||
let image = app.to_image();
|
||||
|
||||
let app_build = app.to_build();
|
||||
let _ = match local_docker.has_cache(&app_build) {
|
||||
Ok(CacheResult::Hit) => assert!(false),
|
||||
Ok(CacheResult::Miss(parent_build)) => assert!(true),
|
||||
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
|
||||
Err(err) => assert!(false),
|
||||
};
|
||||
|
||||
let _ = match ecr.pull(&image).unwrap() {
|
||||
PullResult::Some(_) => assert!(false),
|
||||
PullResult::None => assert!(true),
|
||||
};
|
||||
|
||||
let build_result = local_docker.build(app.to_build(), false).unwrap();
|
||||
|
||||
let _ = match ecr.push(&build_result.build.image, false) {
|
||||
Ok(_) => assert!(true),
|
||||
Err(_) => assert!(false),
|
||||
};
|
||||
|
||||
// TODO clean local docker cache
|
||||
|
||||
let start_pull_time = SystemTime::now();
|
||||
let _ = match ecr.pull(&build_result.build.image).unwrap() {
|
||||
PullResult::Some(_) => assert!(true),
|
||||
PullResult::None => assert!(false),
|
||||
};
|
||||
|
||||
let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap();
|
||||
|
||||
let _ = match local_docker.has_cache(&build_result.build) {
|
||||
Ok(CacheResult::Hit) => assert!(true),
|
||||
Ok(CacheResult::Miss(parent_build)) => assert!(false),
|
||||
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
|
||||
Err(err) => assert!(false),
|
||||
};
|
||||
|
||||
let start_pull_time = SystemTime::now();
|
||||
let _ = match ecr.pull(&image).unwrap() {
|
||||
PullResult::Some(_) => assert!(true),
|
||||
PullResult::None => assert!(false),
|
||||
};
|
||||
|
||||
let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap();
|
||||
|
||||
if pull_duration_2.as_millis() > pull_duration.as_millis() {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
return test_name.to_string();
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-aws-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
@@ -173,6 +269,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
|
||||
.as_str(),
|
||||
),
|
||||
],
|
||||
None,
|
||||
);
|
||||
for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") {
|
||||
assert_eq!(pdb.metadata.name.contains(&environment.applications[0].name), false)
|
||||
@@ -224,6 +321,7 @@ fn deploy_a_working_environment_and_pause_it_eks() {
|
||||
.as_str(),
|
||||
),
|
||||
],
|
||||
None,
|
||||
);
|
||||
let mut filtered_pdb = false;
|
||||
for pdb in pdbs.expect("Unable to get pdbs").items.expect("Unable to get pdbs") {
|
||||
|
||||
@@ -166,126 +166,6 @@ fn deploy_an_environment_with_db_and_pause_it() {
|
||||
})
|
||||
}
|
||||
|
||||
// this test ensure containers databases are never restarted, even in failover environment case
|
||||
#[cfg(feature = "test-do-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
fn postgresql_failover_dev_environment_with_all_options() {
|
||||
let test_name = function_name!();
|
||||
engine_run_test(|| {
|
||||
init();
|
||||
|
||||
let span = span!(Level::INFO, "test", name = test_name);
|
||||
let _enter = span.enter();
|
||||
|
||||
let secrets = FuncTestsSecrets::new();
|
||||
let logger = logger();
|
||||
let context = context(
|
||||
secrets
|
||||
.DIGITAL_OCEAN_TEST_ORGANIZATION_ID
|
||||
.as_ref()
|
||||
.expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"),
|
||||
secrets
|
||||
.DIGITAL_OCEAN_TEST_CLUSTER_ID
|
||||
.as_ref()
|
||||
.expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"),
|
||||
);
|
||||
let context_for_deletion = context.clone_not_same_execution_id();
|
||||
let test_domain = secrets
|
||||
.clone()
|
||||
.DEFAULT_TEST_DOMAIN
|
||||
.expect("DEFAULT_TEST_DOMAIN is not set in secrets");
|
||||
|
||||
let environment = test_utilities::common::environnement_2_app_2_routers_1_psql(
|
||||
&context,
|
||||
test_domain.as_str(),
|
||||
DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
|
||||
DO_SELF_HOSTED_DATABASE_DISK_TYPE,
|
||||
Kind::Do,
|
||||
);
|
||||
let environment_check = environment.clone();
|
||||
let mut environment_never_up = environment.clone();
|
||||
// error in ports, these applications will never be up !!
|
||||
environment_never_up.applications = environment_never_up
|
||||
.applications
|
||||
.into_iter()
|
||||
.map(|mut app| {
|
||||
app.ports = vec![Port {
|
||||
id: "zdf7d6aad".to_string(),
|
||||
long_id: Default::default(),
|
||||
port: 4789,
|
||||
public_port: Some(443),
|
||||
name: None,
|
||||
publicly_accessible: true,
|
||||
protocol: Protocol::HTTP,
|
||||
}];
|
||||
app
|
||||
})
|
||||
.collect::<Vec<qovery_engine::models::Application>>();
|
||||
let mut environment_delete = test_utilities::common::environnement_2_app_2_routers_1_psql(
|
||||
&context_for_deletion,
|
||||
test_domain.as_str(),
|
||||
DO_SELF_HOSTED_DATABASE_INSTANCE_TYPE,
|
||||
DO_SELF_HOSTED_DATABASE_DISK_TYPE,
|
||||
Kind::Do,
|
||||
);
|
||||
|
||||
environment_delete.action = Action::Delete;
|
||||
|
||||
let env_action = EnvironmentAction::Environment(environment.clone());
|
||||
let env_action_fail_ok =
|
||||
EnvironmentAction::EnvironmentWithFailover(environment_never_up.clone(), environment.clone());
|
||||
let env_action_for_deletion = EnvironmentAction::Environment(environment_delete.clone());
|
||||
|
||||
match environment.deploy_environment(Kind::Do, &context, &env_action, logger.clone()) {
|
||||
TransactionResult::Ok => assert!(true),
|
||||
TransactionResult::Rollback(_) => assert!(false),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY
|
||||
let database_name = format!("postgresql-{}-0", &environment_check.databases[0].name);
|
||||
match is_pod_restarted_env(
|
||||
context.clone(),
|
||||
ProviderKind::Do,
|
||||
environment_check.clone(),
|
||||
database_name.as_str(),
|
||||
secrets.clone(),
|
||||
) {
|
||||
(true, _) => assert!(true),
|
||||
(false, _) => assert!(false),
|
||||
}
|
||||
match environment_never_up.deploy_environment(Kind::Do, &context, &env_action_fail_ok, logger.clone()) {
|
||||
TransactionResult::Ok => assert!(false),
|
||||
TransactionResult::Rollback(_) => assert!(true),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
// TO CHECK: DATABASE SHOULDN'T BE RESTARTED AFTER A REDEPLOY EVEN IF FAIL
|
||||
match is_pod_restarted_env(
|
||||
context.clone(),
|
||||
ProviderKind::Do,
|
||||
environment_check.clone(),
|
||||
database_name.as_str(),
|
||||
secrets.clone(),
|
||||
) {
|
||||
(true, _) => assert!(true),
|
||||
(false, _) => assert!(false),
|
||||
}
|
||||
|
||||
match environment_delete.delete_environment(Kind::Do, &context_for_deletion, &env_action_for_deletion, logger) {
|
||||
TransactionResult::Ok => assert!(true),
|
||||
TransactionResult::Rollback(_) => assert!(false),
|
||||
TransactionResult::UnrecoverableError(_, _) => assert!(false),
|
||||
};
|
||||
|
||||
// delete images created during test from registries
|
||||
if let Err(e) = clean_environments(&context, vec![environment, environment_delete], secrets, DO_TEST_REGION) {
|
||||
warn!("cannot clean environments, error: {:?}", e);
|
||||
}
|
||||
|
||||
return test_name.to_string();
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure a full environment can run correctly
|
||||
#[cfg(feature = "test-do-self-hosted")]
|
||||
#[named]
|
||||
@@ -418,7 +298,7 @@ fn postgresql_deploy_a_working_environment_and_redeploy() {
|
||||
id: generate_id(),
|
||||
name: database_db_name.clone(),
|
||||
version: "11.8.0".to_string(),
|
||||
fqdn_id: "postgresql-".to_string() + generate_id().as_str(),
|
||||
fqdn_id: database_host.clone(),
|
||||
fqdn: database_host.clone(),
|
||||
port: database_port,
|
||||
username: database_username.clone(),
|
||||
|
||||
@@ -6,12 +6,16 @@ use self::test_utilities::utilities::{
|
||||
engine_run_test, generate_id, get_pods, get_pvc, init, is_pod_restarted_env, logger, FuncTestsSecrets,
|
||||
};
|
||||
use ::function_name::named;
|
||||
use qovery_engine::build_platform::{BuildPlatform, CacheResult};
|
||||
use qovery_engine::cloud_provider::Kind;
|
||||
use qovery_engine::container_registry::{ContainerRegistry, PullResult};
|
||||
use qovery_engine::models::{Action, Clone2, EnvironmentAction, Port, Protocol, Storage, StorageType};
|
||||
use qovery_engine::transaction::TransactionResult;
|
||||
use std::collections::BTreeMap;
|
||||
use std::time::SystemTime;
|
||||
use test_utilities::common::Infrastructure;
|
||||
use test_utilities::utilities::context;
|
||||
use test_utilities::digitalocean::container_registry_digital_ocean;
|
||||
use test_utilities::utilities::{build_platform_local_docker, context};
|
||||
use tracing::{span, warn, Level};
|
||||
|
||||
// Note: All those tests relies on a test cluster running on DigitalOcean infrastructure.
|
||||
@@ -78,6 +82,95 @@ fn digitalocean_doks_deploy_a_working_environment_with_no_router() {
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-do-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
fn test_build_cache() {
|
||||
let test_name = function_name!();
|
||||
engine_run_test(|| {
|
||||
init();
|
||||
let span = span!(Level::INFO, "test", name = test_name);
|
||||
let _enter = span.enter();
|
||||
|
||||
let logger = logger();
|
||||
let secrets = FuncTestsSecrets::new();
|
||||
let context = context(
|
||||
secrets
|
||||
.DIGITAL_OCEAN_TEST_ORGANIZATION_ID
|
||||
.as_ref()
|
||||
.expect("DIGITAL_OCEAN_TEST_ORGANIZATION_ID is not set"),
|
||||
secrets
|
||||
.DIGITAL_OCEAN_TEST_CLUSTER_ID
|
||||
.as_ref()
|
||||
.expect("DIGITAL_OCEAN_TEST_CLUSTER_ID is not set"),
|
||||
);
|
||||
|
||||
let mut environment = test_utilities::common::working_minimal_environment(
|
||||
&context,
|
||||
secrets
|
||||
.DEFAULT_TEST_DOMAIN
|
||||
.expect("DEFAULT_TEST_DOMAIN is not set in secrets")
|
||||
.as_str(),
|
||||
);
|
||||
|
||||
let docr = container_registry_digital_ocean(&context);
|
||||
let local_docker = build_platform_local_docker(&context);
|
||||
let app = environment.applications.first().unwrap();
|
||||
let image = app.to_image();
|
||||
|
||||
let app_build = app.to_build();
|
||||
let _ = match local_docker.has_cache(&app_build) {
|
||||
Ok(CacheResult::Hit) => assert!(false),
|
||||
Ok(CacheResult::Miss(parent_build)) => assert!(true),
|
||||
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
|
||||
Err(err) => assert!(false),
|
||||
};
|
||||
|
||||
let _ = match docr.pull(&image).unwrap() {
|
||||
PullResult::Some(_) => assert!(false),
|
||||
PullResult::None => assert!(true),
|
||||
};
|
||||
|
||||
let build_result = local_docker.build(app.to_build(), false).unwrap();
|
||||
|
||||
let _ = match docr.push(&build_result.build.image, false) {
|
||||
Ok(_) => assert!(true),
|
||||
Err(_) => assert!(false),
|
||||
};
|
||||
|
||||
// TODO clean local docker cache
|
||||
|
||||
let start_pull_time = SystemTime::now();
|
||||
let _ = match docr.pull(&build_result.build.image).unwrap() {
|
||||
PullResult::Some(_) => assert!(true),
|
||||
PullResult::None => assert!(false),
|
||||
};
|
||||
|
||||
let pull_duration = SystemTime::now().duration_since(start_pull_time).unwrap();
|
||||
|
||||
let _ = match local_docker.has_cache(&build_result.build) {
|
||||
Ok(CacheResult::Hit) => assert!(true),
|
||||
Ok(CacheResult::Miss(parent_build)) => assert!(false),
|
||||
Ok(CacheResult::MissWithoutParentBuild) => assert!(false),
|
||||
Err(err) => assert!(false),
|
||||
};
|
||||
|
||||
let start_pull_time = SystemTime::now();
|
||||
let _ = match docr.pull(&image).unwrap() {
|
||||
PullResult::Some(_) => assert!(true),
|
||||
PullResult::None => assert!(false),
|
||||
};
|
||||
|
||||
let pull_duration_2 = SystemTime::now().duration_since(start_pull_time).unwrap();
|
||||
|
||||
if pull_duration_2.as_millis() > pull_duration.as_millis() {
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
return test_name.to_string();
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "test-do-self-hosted")]
|
||||
#[named]
|
||||
#[test]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user