Skip to content

Commit

Permalink
test: simplify go-test
Browse files Browse the repository at this point in the history
Remove duplicated description between the default configuration
and the custom namespace configuration.

Signed-off-by: Satoru Takeuchi <[email protected]>
  • Loading branch information
satoru-takeuchi committed Jun 19, 2024
1 parent d06a1cf commit d8b362a
Show file tree
Hide file tree
Showing 3 changed files with 211 additions and 381 deletions.
197 changes: 197 additions & 0 deletions .github/workflows/go-test-config/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
name: go-test
description: "test kubectl-rook-ceph commands"
inputs:
op-ns:
description: operator namespace where rook operator will deploy
required: true
cluster-ns:
description: cluster namespace where ceph cluster will deploy
required: true

runs:
using: "composite"
steps:
- name: set environment variables
run: |
OP_NS_OPT=""
CLUSTER_NS_OPT=""
test "${{ input.op-ns }}" != rook-ceph && OP_NS_OPT="--operator-namespace ${{ input.op-ns }}"
test "${{ input.cluster-ns }}" != rook-ceph && CLUSTER_NS_OPT="--cluster-namespace ${{ input.cluster-ns }}"
echo "NS_OPT='${OP_NS_OPT} ${CLUSTER_NS_OPT}'" >> $GITHUB_ENV
- name: checkout
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: setup golang
uses: ./.github/workflows/set-up-go

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster
uses: ./.github/workflows/cluster-setup
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
op-ns: ${{ input.op-ns }}
cluster-ns: ${{ input.cluster-ns }}

- name: build the binary and run unit tests
run: |
make build
sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph
make test
- name: Cluster Health
run: |
set -e
kubectl rook-ceph "${NS_OPT}" health
- name: Ceph status
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" ceph status
- name: Ceph daemon
run: |
set -ex
kubectl rook-ceph "${NS_OPT} ceph daemon mon.a dump_historic_ops
- name: Ceph status using context
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" --context=$(kubectl config current-context) ceph status
- name: Rados df using context
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" --context=$(kubectl config current-context) rados df
- name: radosgw-admin create user
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" radosgw-admin user create --display-name="johnny rotten" --uid=johnny
- name: Mon restore
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph "${NS_OPT}" mons restore-quorum a
kubectl -n ${{input.cluster-ns}} wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n ${{input.cluster-ns}} wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons ${{ input.cluster-ns }}
kubectl -n ${{input.cluster-ns}} wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n ${{input.cluster-ns}} wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: Rbd command
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" rbd ls replicapool
- name: Flatten a PVC clone
run: |
set -ex
tests/github-action-helper.sh install_external_snapshotter
tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound
kubectl rook-ceph "${NS_OPT} flatten-rbd-pvc rbd-pvc-clone
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph "${NS_OPT}" subvolume ls
kubectl rook-ceph "${NS_OPT}" subvolume ls --stale
kubectl rook-ceph "${NS_OPT}" subvolume delete test-subvol myfs group-a
tests/github-action-helper.sh create_sc_with_retain_policy
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph "${NS_OPT}" subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph "${NS_OPT}" subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" rook version
kubectl rook-ceph "${NS_OPT}" rook status
kubectl rook-ceph "${NS_OPT}" rook status all
kubectl rook-ceph "${NS_OPT}" rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n ${{ input.op-ns}} get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n ${{ input.op-ns}} wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state ${{ input.op-ns }}
- name: Maintenance Mode
run: |
set -ex
kubectl rook_ceph "${NS_OPT}" maintenance start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance ${{ input.cluster-ns }}
kubectl rook_ceph "${NS_OPT}" maintenance stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 ${{ input.cluster-ns }}
- name: Purge Osd
run: |
set -ex
kubectl -n ${{ input.cluster-ns}} scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph "${NS_OPT}" rook purge-osd 0 --force
- name: Restore CRD without CRName
run: |
# First let's delete the cephCluster
kubectl -n ${{ input.cluster-ns}} delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph "${NS_OPT}" restore-deleted cephcluster
tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ input.cluster-ns }}
- name: Restore CRD with CRName
run: |
# First let's delete the cephCluster
kubectl -n ${{ input.cluster-ns}} delete cephcluster my-cluster --timeout 3s --wait=false
kubectl rook-ceph "${NS_OPT}" restore-deleted cephcluster my-cluster
tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ input.cluster-ns }}
- name: Show Cluster State
run: |
set -ex
kubectl -n ${{ input.cluster-ns}} get all
- name: Destroy Cluster (removing CRs)
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
run: |
set -ex
kubectl rook-ceph "${NS_OPT}" destroy-cluster
sleep 1
kubectl get deployments --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; exit 1;})
- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: go-test

- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: false
Loading

0 comments on commit d8b362a

Please sign in to comment.