-
Notifications
You must be signed in to change notification settings - Fork 2
/
.gitlab-ci.yml
235 lines (211 loc) · 9.26 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
---
image: docker:25.0.3-alpine3.19
variables:
# ! Gitlab says you can use variables in image properties but they don't work
# Try to keep these current to receive performance and security improvements
# CLOUD_SDK_IMAGE: google/cloud-sdk:464.0.0
# DOCKER_IMAGE: docker:25.0.3-alpine3.19
# KUBE_INGRESS_BASE_DOMAIN is the application deployment.
# domain and should be set as a variable at the group or project level.
KUBE_INGRESS_BASE_DOMAIN: stg.cca.edu
# ! not clear to me what these are doing -EP
POSTGRES_ENABLED: "true"
POSTGRES_DB: $CI_ENVIRONMENT_SLUG
POSTGRES_VERSION: 9.6.2
KUBERNETES_VERSION: 1.16.15
# Docker-in-Docker (DIND)
# When using dind service, you must instruct Docker to talk with
# the daemon started inside of the service. The daemon is available
# with a network connection instead of the default
# /var/run/docker.sock socket.
#
# The 'docker' hostname is the alias of the service container as described at
# https://docs.gitlab.com/ee/ci/services/#accessing-the-services.
DOCKER_HOST: tcp://docker:2376
# Specify to Docker where to create the certificates. Docker
# creates them automatically on boot, and creates
# `/certs/client` to share between the service and job
# container, thanks to volume mount from config.toml
DOCKER_TLS_CERTDIR: "/certs"
# These are usually specified by the entrypoint, however the
# Kubernetes executor doesn't run entrypoints
# https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4125
DOCKER_TLS_VERIFY: 1
DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client"
ROLLOUT_RESOURCE_TYPE: deployment
services:
- docker:dind
stages:
- build
- synchronize
- deploy
# Build & Push Docker image to Google Artifact Registry staging
docker_image_staging:
image: docker:25.0.3-alpine3.19
stage: build
rules:
- if: "$CI_COMMIT_REF_NAME =~ /^(mg|ep)-(fast|nodb|full)-*/"
when: always
variables:
DOCKER_IMAGE: us.gcr.io/cca-web-staging/libraries
DOCKER_IMAGE_TAG: "${DOCKER_IMAGE}:${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}"
services:
- docker:dind
script:
# login first otherwise `docker pull` throws errors
- echo "${SA_SERVICE_ACCOUNT_KEY}" > key.json
- docker login -u _json_key --password-stdin https://us.gcr.io < key.json
# we need a consistent tag like "latest" or this docker pull fails & we lose our build cache
- docker pull --quiet "${DOCKER_IMAGE}:latest" || true
# must build with --build-arg BUILDKIT_INLINE_CACHE=1 or image doesn't work with --cache-form
- docker build --build-arg BUILDKIT_INLINE_CACHE=1 --cache-from "${DOCKER_IMAGE}:latest" -t "${DOCKER_IMAGE_TAG}" .
- docker tag "${DOCKER_IMAGE_TAG}" "${DOCKER_IMAGE}:latest"
- docker push --all-tags "${DOCKER_IMAGE}"
# Build & Push Docker image to Google Artifact Registry prod
docker_image_prod:
image: docker:25.0.3-alpine3.19
stage: build
rules:
- if: "$CI_COMMIT_REF_NAME =~ /^release-*/"
when: always
variables:
DOCKER_IMAGE: "us-west2-docker.pkg.dev/cca-web-0/docker-web/libraries"
DOCKER_IMAGE_TAG: "${DOCKER_IMAGE}:${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}"
services:
- docker:dind
script:
- echo "${SA_SERVICE_ACCOUNT_PROD_KEY}" > key.json
- docker login -u _json_key --password-stdin https://us-west2-docker.pkg.dev < key.json
- docker pull --quiet "${DOCKER_IMAGE}:latest" || true
# must build with --build-arg BUILDKIT_INLINE_CACHE=1 or image doesn't work with --cache-form
- docker build --build-arg BUILDKIT_INLINE_CACHE=1 --cache-from "${DOCKER_IMAGE}:latest" -t "${DOCKER_IMAGE_TAG}" .
- docker tag "${DOCKER_IMAGE_TAG}" "${DOCKER_IMAGE}:latest"
- docker push --all-tags "${DOCKER_IMAGE}"
# ---------------------
# Google Authentication
# ---------------------
.gcloud_auth_prod: &gcloud_auth_prod
- echo "${SA_SERVICE_ACCOUNT_PROD_KEY}" > key.json
- gcloud auth activate-service-account --key-file=key.json
- gcloud config set project cca-web-0
- gcloud config set container/cluster ccaedu-prod
- gcloud config set compute/zone us-west1-b
- gcloud container clusters get-credentials ccaedu-prod --zone us-west1-b
.gcloud_auth_staging: &gcloud_auth_staging
- echo "${SA_SERVICE_ACCOUNT_KEY}" > key.json
- gcloud auth activate-service-account --key-file=key.json
- gcloud config set project cca-web-staging
- gcloud config set container/cluster ccaedu-stg
- gcloud config set compute/zone us-west1-b
- gcloud container clusters get-credentials ccaedu-stg --zone us-west1-b
# ---------------------------------------
# STAGING PROJECT SET-UP - ANCHOR DEFS
# ---------------------------------------
# ! This step is commented out in the synchronize_stage below
# For this step to work, the gitlab ci service account must have read permissions on the production export GSB.
# In addition, the cloud sql instance service account _must_ have the correct permissions on the GBS:
# https://cloud.google.com/sql/docs/postgres/import-export/importing
.provision_db_staging: &provision_db_staging
- export DB_FILE=libraries_export_"$(date +'%Y_%m_%d')".gz
- gsutil cp gs://cca-manual-db-dumps/${DB_FILE} gs://libraries-db-dumps-ci
- gcloud sql databases delete libraries-${KUBERNETES_NAMESPACE_OVERWRITE} --instance=psql14-instance || true
- gcloud sql databases create libraries-${KUBERNETES_NAMESPACE_OVERWRITE} --instance=psql14-instance --charset=UTF8 --collation=en_US.UTF8 || true
- gcloud sql import sql psql14-instance gs://libraries-db-dumps-ci/${DB_FILE} --database=libraries-${KUBERNETES_NAMESPACE_OVERWRITE} -q || true
#- gsutil rm gs://cca-manual-db-dumps/${DB_FILE}
# Frequently not needed & not done during ep-fast-0.x deploys
.provision_secrets_staging: &provision_secrets_staging
- kubectl get secret cloud-storage-credentials cloudsql-db-credentials summon-sftp-secrets --namespace=libraries -oyaml | sed -e '/namespace:/d;/creationTimestamp:/d;/resourceVersion:/d;/uid:/d' | kubectl apply --namespace="${KUBERNETES_NAMESPACE_OVERWRITE}" -f - || true
# ---------------------
# DEPLOYMENTS - STAGING
# ---------------------
.apply_k8s_stage: &apply_k8s_stage
- cat kubernetes/staging.yaml | envsubst | kubectl apply -f -
.provision_storage_stage: &provision_storage_stage
- gsutil ls -b gs://libraries-media-staging-"${KUBERNETES_NAMESPACE_OVERWRITE}"/ || gsutil mb -p cca-web-staging -c STANDARD -l us-west1 -b on gs://libraries-media-staging-"${KUBERNETES_NAMESPACE_OVERWRITE}"/
# removed -d flag from rsync so files are not deleted from lib-ep bucket
- gsutil -q -m rsync -r gs://libraries-media-staging/ gs://libraries-media-staging-"${KUBERNETES_NAMESPACE_OVERWRITE}"/
- gsutil iam ch allUsers:objectViewer gs://libraries-media-staging-"${KUBERNETES_NAMESPACE_OVERWRITE}"/
# this synchronizes gs://libraries-media-staging/ with gs://libraries-media-staging-lib-ep
# ! not necessary, skipped for now, edit "rules" to reactivate
# tag example: "ep-full-0.23" or "mg-full-0.0.1".
synchronize_stage:
image: google/cloud-sdk:464.0.0
stage: synchronize
rules:
- when: never
# - if: "$CI_COMMIT_REF_NAME =~ /^(mg|jj|althea|ep)-full-*/"
# when: always
script:
#- *export_prod_db
- *gcloud_auth_staging
#- *provision_db_staging
- *provision_storage_stage
environment:
name: "$CI_COMMIT_REF_NAME"
# tag example: "ep-full-0.23" or "mg-full-0.0.1".
deploy_full_stage:
image: google/cloud-sdk:464.0.0
stage: deploy
rules:
- if: "$CI_COMMIT_REF_NAME =~ /^(mg|ep)-full-*/"
when: always
script:
- apt-get update
- apt -y install gettext
- *gcloud_auth_staging
- *apply_k8s_stage
- *provision_secrets_staging
environment:
name: "$CI_COMMIT_REF_NAME"
# Only difference is not provisioning secrets?
# tag example: mg-fast-0.0.1.
deploy_fast_stage:
image: google/cloud-sdk:464.0.0
stage: deploy
rules:
- if: "$CI_COMMIT_REF_NAME =~ /^(mg|ep)-fast-*/"
when: always
script:
- apt-get update
- apt -y install gettext
- *gcloud_auth_staging
- *apply_k8s_stage
environment:
name: "$CI_COMMIT_REF_NAME"
url: https://$CI_ENVIRONMENT_SLUG.stg.cca.edu
# ------------------------
# DEPLOYMENTS - PRODUCTION
# ------------------------
# ONLY HERE FOR REFERENCE - THIS SHOULD ONLY BE DONE ONCE
# stage: deploy
# rules:
# - if: "$CI_COMMIT_REF_NAME =~ /^staging-fast-*/"
# when: always
# script:
# - apt-get update
# - apt -y install gettext
# - *gcloud_auth_staging
# - *apply_k8s_staging
# environment:
# name: "$CI_COMMIT_REF_NAME"
# url: https://$CI_ENVIRONMENT_SLUG.stg.cca.edu
#.provision_storage_prod: &provision_storage_prod
# - gsutil ls -b gs://libraries-media.cca.edu/ || gsutil mb -p cca-web-0 -c STANDARD -l us-west1 -b on gs://libraries-media.cca.edu/
# - gsutil iam ch allUsers:objectViewer gs://libraries-media.cca.edu/
# - export CORS_ORIGIN='libraries'; envsubst < kubernetes/cors.json > /tmp/cors.json; gsutil cors set /tmp/cors.json gs://libraries-media.cca.edu; rm /tmp/cors.json
.apply_k8s_prod: &apply_k8s_prod
- cat kubernetes/production.yaml | envsubst | kubectl apply -f -
# tag example: release-${VERSION}
deploy_release:
image: google/cloud-sdk:464.0.0
stage: deploy
rules:
- if: "$CI_COMMIT_REF_NAME =~ /^release-*/"
when: manual
script:
- apt-get update
- apt -y install gettext
- *gcloud_auth_prod
- *apply_k8s_prod
environment:
name: "$CI_COMMIT_REF_NAME"