-
Notifications
You must be signed in to change notification settings - Fork 28
374 lines (310 loc) · 13.9 KB
/
go-test.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
name: Plugin Go test
on:
pull_request:
defaults:
run:
# reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell
shell: bash --noprofile --norc -eo pipefail -x {0}
# cancel the in-progress workflow when PR is refreshed.
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
jobs:
default-namespace:
runs-on: ubuntu-20.04
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup golang
uses: ./.github/workflows/set-up-go
- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}
- name: setup cluster
uses: ./.github/workflows/cluster-setup
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: build the binary and run unit tests
run: |
make build
sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph
make test
- name: Cluster Health
run: |
set -e
kubectl rook-ceph health
- name: Ceph status
run: |
set -ex
kubectl rook-ceph ceph status
- name: Ceph daemon
run: |
set -ex
kubectl rook-ceph ceph daemon mon.a dump_historic_ops
- name: Ceph status using context
run: |
set -ex
kubectl rook-ceph --context=$(kubectl config current-context) ceph status
- name: Rados df using context
run: |
set -ex
kubectl rook-ceph --context=$(kubectl config current-context) rados df
- name: radosgw-admin create user
run: |
set -ex
kubectl rook-ceph radosgw-admin user create --display-name="johnny rotten" --uid=johnny
- name: Mon restore
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph mons restore-quorum a
kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons rook-ceph
kubectl -n rook-ceph wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n rook-ceph wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: RBD command
run: |
set -ex
kubectl rook-ceph rbd ls replicapool
- name: Flatten a PVC clone
run: |
set -ex
tests/github-action-helper.sh install_external_snapshotter
tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound
kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete myfs test-subvol group-a
tests/github-action-helper.sh create_sc_with_retain_policy
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph rook version
kubectl rook-ceph rook status
kubectl rook-ceph rook status all
kubectl rook-ceph rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n rook-ceph get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n rook-ceph wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_default
- name: Maintenance Mode
run: |
set -ex
kubectl rook_ceph maintenance start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance rook-ceph
kubectl rook_ceph maintenance stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 rook-ceph
- name: Purge Osd
run: |
set -ex
kubectl -n rook-ceph scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph rook purge-osd 0 --force
- name: Restore CRD without CRName
run: |
# First let's delete the cephCluster
kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph -n rook-ceph restore-deleted cephclusters
tests/github-action-helper.sh wait_for_crd_to_be_ready_default
- name: Restore CRD with CRName
run: |
# First let's delete the cephCluster
kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph -n rook-ceph restore-deleted cephclusters my-cluster
tests/github-action-helper.sh wait_for_crd_to_be_ready_default
- name: Show Cluster State
run: |
set -ex
kubectl -n rook-ceph get all
- name: Destroy Cluster (removing CRs)
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
run: |
set -ex
kubectl rook-ceph destroy-cluster
sleep 1
kubectl get deployments -n rook-ceph --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n rook-ceph; exit 1;})
- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: go-test
- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: false
custom-namespace:
runs-on: ubuntu-20.04
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
steps:
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: setup golang
uses: ./.github/workflows/set-up-go
- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}
- name: setup cluster
uses: ./.github/workflows/cluster-setup
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
op-ns: "test-operator"
cluster-ns: "test-cluster"
- name: build the binary and run unit tests
run: |
make build
sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph
make test
- name: Cluster Health
run: |
set -e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster health
- name: Ceph status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph status
- name: Ceph daemon
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph daemon osd.0 dump_historic_ops
- name: Rados df
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rados df
- name: radosgw-admin create user
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster radosgw-admin user create --display-name="johnny rotten" --uid=johnny
- name: Ceph status using context
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster --context=$(kubectl config current-context) ceph status
- name: Mon restore
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons restore-quorum a
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons test-cluster
kubectl -n test-cluster wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n test-cluster wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: RBD command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rbd ls replicapool
- name: Flatten a PVC clone
run: |
set -ex
tests/github-action-helper.sh install_external_snapshotter
tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound
kubectl rook-ceph --operator-namespace test-operator -n test-cluster flatten-rbd-pvc rbd-pvc-clone
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
tests/github-action-helper.sh create_sc_with_retain_policy_custom_ns test-operator test-cluster
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook version
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status all
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n test-operator get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n test-operator wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_custom
- name: Maintenance Mode
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance test-cluster
kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 test-cluster
- name: Purge Osd
run: |
set -ex
kubectl -n test-cluster scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook purge-osd 0 --force
- name: Restore CRD without CRName
run: |
# First let's delete the cephCluster
kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters
tests/github-action-helper.sh wait_for_crd_to_be_ready_custom
- name: Restore CRD with CRName
run: |
# First let's delete the cephCluster
kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters my-cluster
tests/github-action-helper.sh wait_for_crd_to_be_ready_custom
- name: Show Cluster State
run: |
set -ex
kubectl -n test-cluster get all
- name: Destroy Cluster (removing CRs)
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster destroy-cluster
sleep 1
kubectl get deployments -n test-cluster --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n test-cluster; exit 1;})
- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: go-test-custom-namespace
- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: false