From 36b237af2697dc38dae050cfe9da670bd0d7e65a Mon Sep 17 00:00:00 2001 From: Satoru Takeuchi Date: Wed, 19 Jun 2024 07:59:15 +0000 Subject: [PATCH] test: simplify go-test Remove duplicated description between the default configuration and the custom namespace configuration. Signed-off-by: Satoru Takeuchi --- .github/workflows/cluster-setup/action.yaml | 4 +- .github/workflows/go-test-config/action.yaml | 200 ++++++++++++ .github/workflows/go-test.yaml | 312 +------------------ tests/github-action-helper.sh | 85 ++--- 4 files changed, 242 insertions(+), 359 deletions(-) create mode 100644 .github/workflows/go-test-config/action.yaml diff --git a/.github/workflows/cluster-setup/action.yaml b/.github/workflows/cluster-setup/action.yaml index 4daf3f82..5f231d79 100644 --- a/.github/workflows/cluster-setup/action.yaml +++ b/.github/workflows/cluster-setup/action.yaml @@ -31,10 +31,10 @@ runs: - name: deploy rook cluster shell: bash --noprofile --norc -eo pipefail -x {0} - if: inputs.op-ns == '' || inputs.cluster-ns == '' + if: inputs.op-ns == 'rook-ceph' || inputs.cluster-ns == 'rook-ceph' run: tests/github-action-helper.sh deploy_rook - name: deploy rook cluster in custom namespace shell: bash --noprofile --norc -eo pipefail -x {0} - if: inputs.op-ns != '' || inputs.cluster-ns != '' + if: inputs.op-ns != 'rook-ceph' || inputs.cluster-ns != 'rook-ceph' run: tests/github-action-helper.sh deploy_rook_in_custom_namespace ${{ inputs.op-ns }} ${{ inputs.cluster-ns }} diff --git a/.github/workflows/go-test-config/action.yaml b/.github/workflows/go-test-config/action.yaml new file mode 100644 index 00000000..a3513de9 --- /dev/null +++ b/.github/workflows/go-test-config/action.yaml @@ -0,0 +1,200 @@ +name: go-test +description: "test kubectl-rook-ceph commands" +inputs: + op-ns: + description: operator namespace where rook operator will deploy + required: true + cluster-ns: + description: cluster namespace where ceph cluster will deploy + required: true + github-token: + description: GITHUB_TOKEN from the calling workflow + required: true + +runs: + using: "composite" + steps: + - name: set environment variables + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + OP_NS_OPT="" + CLUSTER_NS_OPT="" + test ${{ inputs.op-ns }} != rook-ceph && OP_NS_OPT="--operator-namespace ${{ inputs.op-ns }}" + test ${{ inputs.cluster-ns }} != rook-ceph && CLUSTER_NS_OPT="-n ${{ inputs.cluster-ns }}" + + echo "NS_OPT=${OP_NS_OPT} ${CLUSTER_NS_OPT}" >> $GITHUB_ENV + + - name: setup golang + uses: ./.github/workflows/set-up-go + + - name: setup cluster + uses: ./.github/workflows/cluster-setup + with: + github-token: ${{ inputs.github-token }} + op-ns: ${{ inputs.op-ns }} + cluster-ns: ${{ inputs.cluster-ns }} + + - name: build the binary and run unit tests + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + make build + sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph + make test + + - name: Cluster Health + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -e + kubectl rook-ceph ${NS_OPT} health + + - name: Ceph status + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} ceph status + + - name: Ceph daemon + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} ceph daemon mon.a dump_historic_ops + + - name: Ceph status using context + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} --context=$(kubectl config current-context) ceph status + + - name: Rados df using context + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} --context=$(kubectl config current-context) rados df + + - name: radosgw-admin create user + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} radosgw-admin user create --display-name="johnny rotten" --uid=johnny + + - name: Mon restore + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + # test the mon restore to restore to mon a, delete mons b and c, then add d and e + kubectl rook-ceph ${NS_OPT} mons restore-quorum a + kubectl -n ${{ inputs.cluster-ns }} wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s + kubectl -n ${{ inputs.cluster-ns }} wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s + tests/github-action-helper.sh wait_for_three_mons ${{ inputs.cluster-ns }} + kubectl -n ${{ inputs.cluster-ns }} wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s + kubectl -n ${{ inputs.cluster-ns }} wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s + + - name: Rbd command + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} rbd ls replicapool + + - name: Flatten a PVC clone + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + tests/github-action-helper.sh install_external_snapshotter + tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound + + kubectl rook-ceph ${NS_OPT} flatten-rbd-pvc rbd-pvc-clone + + - name: Subvolume command + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} ceph fs subvolume create myfs test-subvol group-a + kubectl rook-ceph ${NS_OPT} subvolume ls + kubectl rook-ceph ${NS_OPT} subvolume ls --stale + kubectl rook-ceph ${NS_OPT} subvolume delete myfs test-subvol group-a + tests/github-action-helper.sh create_sc_with_retain_policy ${{ inputs.op-ns }} ${{ inputs.cluster-ns }} + tests/github-action-helper.sh create_stale_subvolume + subVol=$(kubectl rook-ceph ${NS_OPT} subvolume ls --stale | awk '{print $2}' | grep csi-vol) + kubectl rook_ceph ${NS_OPT} subvolume delete myfs $subVol + + - name: Get mon endpoints + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} mons + + - name: Update operator configmap + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} operator set ROOK_LOG_LEVEL DEBUG + + - name: Print cr status + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} rook version + kubectl rook-ceph ${NS_OPT} rook status + kubectl rook-ceph ${NS_OPT} rook status all + kubectl rook-ceph ${NS_OPT} rook status cephobjectstores + + - name: Restart operator pod + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph ${NS_OPT} operator restart + # let's wait for operator pod to be restart + POD=$(kubectl -n ${{ inputs.op-ns }} get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") + kubectl -n ${{ inputs.op-ns }} wait --for=delete pod/$POD --timeout=100s + tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state ${{ inputs.op-ns }} + + - name: Maintenance Mode + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook_ceph ${NS_OPT} maintenance start rook-ceph-osd-0 + tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance ${{ inputs.cluster-ns }} + + kubectl rook_ceph ${NS_OPT} maintenance stop rook-ceph-osd-0 + tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 ${{ inputs.cluster-ns }} + + - name: Purge Osd + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl -n ${{ inputs.cluster-ns }} scale deployment rook-ceph-osd-0 --replicas 0 + kubectl rook-ceph ${NS_OPT} rook purge-osd 0 --force + + - name: Restore CRD without CRName + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + # First let's delete the cephCluster + kubectl -n ${{ inputs.cluster-ns }} delete cephcluster my-cluster --timeout 3s --wait=false + + kubectl rook-ceph ${NS_OPT} restore-deleted cephclusters + tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ inputs.cluster-ns }} + + - name: Restore CRD with CRName + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + # First let's delete the cephCluster + kubectl -n ${{ inputs.cluster-ns }} delete cephcluster my-cluster --timeout 3s --wait=false + + kubectl rook-ceph ${NS_OPT} restore-deleted cephclusters my-cluster + tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ inputs.cluster-ns }} + + - name: Show Cluster State + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl -n ${{ inputs.cluster-ns }} get all + + - name: Destroy Cluster (removing CRs) + shell: bash --noprofile --norc -eo pipefail -x {0} + env: + ROOK_PLUGIN_SKIP_PROMPTS: true + run: | + set -ex + kubectl rook-ceph ${NS_OPT} destroy-cluster + sleep 1 + kubectl get deployments --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; exit 1;}) diff --git a/.github/workflows/go-test.yaml b/.github/workflows/go-test.yaml index fcc4aad1..9f1f557b 100644 --- a/.github/workflows/go-test.yaml +++ b/.github/workflows/go-test.yaml @@ -23,163 +23,18 @@ jobs: with: fetch-depth: 0 - - name: setup golang - uses: ./.github/workflows/set-up-go - - name: consider debugging uses: ./.github/workflows/tmate_debug with: use-tmate: ${{ secrets.USE_TMATE }} - - name: setup cluster - uses: ./.github/workflows/cluster-setup + - name: plugin test + uses: ./.github/workflows/go-test-config with: + op-ns: rook-ceph + cluster-ns: rook-ceph github-token: ${{ secrets.GITHUB_TOKEN }} - - name: build the binary and run unit tests - run: | - make build - sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph - make test - - - name: Cluster Health - run: | - set -e - kubectl rook-ceph health - - - name: Ceph status - run: | - set -ex - kubectl rook-ceph ceph status - - - name: Ceph daemon - run: | - set -ex - kubectl rook-ceph ceph daemon mon.a dump_historic_ops - - - name: Ceph status using context - run: | - set -ex - kubectl rook-ceph --context=$(kubectl config current-context) ceph status - - - name: Rados df using context - run: | - set -ex - kubectl rook-ceph --context=$(kubectl config current-context) rados df - - - name: radosgw-admin create user - run: | - set -ex - kubectl rook-ceph radosgw-admin user create --display-name="johnny rotten" --uid=johnny - - - name: Mon restore - run: | - set -ex - # test the mon restore to restore to mon a, delete mons b and c, then add d and e - kubectl rook-ceph mons restore-quorum a - kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s - kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s - tests/github-action-helper.sh wait_for_three_mons rook-ceph - kubectl -n rook-ceph wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s - kubectl -n rook-ceph wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s - - - name: RBD command - run: | - set -ex - kubectl rook-ceph rbd ls replicapool - - - name: Flatten a PVC clone - run: | - set -ex - tests/github-action-helper.sh install_external_snapshotter - tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound - - kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone - - - name: Subvolume command - run: | - set -ex - kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a - kubectl rook-ceph subvolume ls - kubectl rook-ceph subvolume ls --stale - kubectl rook-ceph subvolume delete myfs test-subvol group-a - tests/github-action-helper.sh create_sc_with_retain_policy - tests/github-action-helper.sh create_stale_subvolume - subVol=$(kubectl rook-ceph subvolume ls --stale | awk '{print $2}' | grep csi-vol) - kubectl rook_ceph subvolume delete myfs $subVol - - - name: Get mon endpoints - run: | - set -ex - kubectl rook-ceph mons - - - name: Update operator configmap - run: | - set -ex - kubectl rook-ceph operator set ROOK_LOG_LEVEL DEBUG - - - name: Print cr status - run: | - set -ex - kubectl rook-ceph rook version - kubectl rook-ceph rook status - kubectl rook-ceph rook status all - kubectl rook-ceph rook status cephobjectstores - - - name: Restart operator pod - run: | - set -ex - kubectl rook-ceph operator restart - # let's wait for operator pod to be restart - POD=$(kubectl -n rook-ceph get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") - kubectl -n rook-ceph wait --for=delete pod/$POD --timeout=100s - tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_default - - - name: Maintenance Mode - run: | - set -ex - kubectl rook_ceph maintenance start rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance rook-ceph - - kubectl rook_ceph maintenance stop rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 rook-ceph - - - name: Purge Osd - run: | - set -ex - kubectl -n rook-ceph scale deployment rook-ceph-osd-0 --replicas 0 - kubectl rook-ceph rook purge-osd 0 --force - - - name: Restore CRD without CRName - run: | - # First let's delete the cephCluster - kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph -n rook-ceph restore-deleted cephclusters - tests/github-action-helper.sh wait_for_crd_to_be_ready_default - - - name: Restore CRD with CRName - run: | - # First let's delete the cephCluster - kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph -n rook-ceph restore-deleted cephclusters my-cluster - tests/github-action-helper.sh wait_for_crd_to_be_ready_default - - - name: Show Cluster State - run: | - set -ex - kubectl -n rook-ceph get all - - - name: Destroy Cluster (removing CRs) - env: - ROOK_PLUGIN_SKIP_PROMPTS: true - run: | - set -ex - kubectl rook-ceph destroy-cluster - sleep 1 - kubectl get deployments -n rook-ceph --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n rook-ceph; exit 1;}) - - name: collect common logs if: always() uses: ./.github/workflows/collect-logs @@ -192,6 +47,7 @@ jobs: with: limit-access-to-actor: false + custom-namespace: runs-on: ubuntu-20.04 env: @@ -202,164 +58,18 @@ jobs: with: fetch-depth: 0 - - name: setup golang - uses: ./.github/workflows/set-up-go - - name: consider debugging - uses: ./.github/workflows/tmate_debug + if: failure() + uses: mxschmitt/action-tmate@v3 with: use-tmate: ${{ secrets.USE_TMATE }} - - name: setup cluster - uses: ./.github/workflows/cluster-setup + - name: plugin test + uses: ./.github/workflows/go-test-config with: + op-ns: test-operator + cluster-ns: test-cluster github-token: ${{ secrets.GITHUB_TOKEN }} - op-ns: "test-operator" - cluster-ns: "test-cluster" - - - name: build the binary and run unit tests - run: | - make build - sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph - make test - - - name: Cluster Health - run: | - set -e - kubectl rook-ceph --operator-namespace test-operator -n test-cluster health - - - name: Ceph status - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph status - - - name: Ceph daemon - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph daemon osd.0 dump_historic_ops - - - name: Rados df - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rados df - - - name: radosgw-admin create user - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster radosgw-admin user create --display-name="johnny rotten" --uid=johnny - - - name: Ceph status using context - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster --context=$(kubectl config current-context) ceph status - - - name: Mon restore - run: | - set -ex - # test the mon restore to restore to mon a, delete mons b and c, then add d and e - kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons restore-quorum a - kubectl -n test-cluster wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s - kubectl -n test-cluster wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s - tests/github-action-helper.sh wait_for_three_mons test-cluster - kubectl -n test-cluster wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s - kubectl -n test-cluster wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s - - - name: RBD command - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rbd ls replicapool - - - name: Flatten a PVC clone - run: | - set -ex - tests/github-action-helper.sh install_external_snapshotter - tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster flatten-rbd-pvc rbd-pvc-clone - - - name: Subvolume command - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a - tests/github-action-helper.sh create_sc_with_retain_policy_custom_ns test-operator test-cluster - tests/github-action-helper.sh create_stale_subvolume - subVol=$(kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale | awk '{print $2}' | grep csi-vol) - kubectl rook_ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs $subVol - - - name: Get mon endpoints - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons - - - name: Update operator configmap - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator set ROOK_LOG_LEVEL DEBUG - - - name: Print cr status - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook version - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status all - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status cephobjectstores - - - name: Restart operator pod - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator restart - # let's wait for operator pod to be restart - POD=$(kubectl -n test-operator get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") - kubectl -n test-operator wait --for=delete pod/$POD --timeout=100s - tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_custom - - - name: Maintenance Mode - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance start rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance test-cluster - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance stop rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 test-cluster - - - name: Purge Osd - run: | - set -ex - kubectl -n test-cluster scale deployment rook-ceph-osd-0 --replicas 0 - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook purge-osd 0 --force - - - name: Restore CRD without CRName - run: | - # First let's delete the cephCluster - kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters - tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - - - name: Restore CRD with CRName - run: | - # First let's delete the cephCluster - kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters my-cluster - tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - - - name: Show Cluster State - run: | - set -ex - kubectl -n test-cluster get all - - - name: Destroy Cluster (removing CRs) - env: - ROOK_PLUGIN_SKIP_PROMPTS: true - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster destroy-cluster - sleep 1 - kubectl get deployments -n test-cluster --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n test-cluster; exit 1;}) - name: collect common logs if: always() diff --git a/tests/github-action-helper.sh b/tests/github-action-helper.sh index ae3226fd..a7bc0d20 100755 --- a/tests/github-action-helper.sh +++ b/tests/github-action-helper.sh @@ -44,7 +44,7 @@ deploy_rook() { sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\//}|g" cluster-test.yaml sed -i '0,/count: 1/ s/count: 1/count: 3/' cluster-test.yaml kubectl create -f cluster-test.yaml - wait_for_pod_to_be_ready_state_default + wait_for_pod_to_be_ready_state rook-ceph kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/toolbox.yaml deploy_csi_driver_default_ns @@ -58,39 +58,32 @@ deploy_rook_in_custom_namespace() { kubectl create namespace test-operator # creating namespace manually because rook common.yaml create one namespace and here we need 2 curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/common.yaml -o common.yaml - deploy_with_custom_ns "$1" "$2" common.yaml + deploy_with_custom_ns "$OPERATOR_NS" "$CLUSTER_NS" common.yaml kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/crds.yaml curl -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/operator.yaml -o operator.yaml - deploy_with_custom_ns "$1" "$2" operator.yaml + deploy_with_custom_ns "$OPERATOR_NS" "$CLUSTER_NS" operator.yaml curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/cluster-test.yaml -o cluster-test.yaml sed -i "s|#deviceFilter:|deviceFilter: ${BLOCK/\/dev\//}|g" cluster-test.yaml sed -i '0,/count: 1/ s/count: 1/count: 3/' cluster-test.yaml - deploy_with_custom_ns "$1" "$2" cluster-test.yaml - wait_for_pod_to_be_ready_state_custom + deploy_with_custom_ns "$OPERATOR_NS" "$CLUSTER_NS" cluster-test.yaml + wait_for_pod_to_be_ready_state $CLUSTER_NS curl -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/toolbox.yaml -o toolbox.yaml - deploy_with_custom_ns "$1" "$2" toolbox.yaml + deploy_with_custom_ns "$OPERATOR_NS" "$CLUSTER_NS" toolbox.yaml - deploy_csi_driver_custom_ns "$1" "$2" + deploy_csi_driver_custom_ns "$OPERATOR_NS" "$CLUSTER_NS" } create_sc_with_retain_policy(){ - curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml -o storageclass.yaml - sed -i "s|name: rook-cephfs|name: rook-cephfs-retain|g" storageclass.yaml - sed -i "s|reclaimPolicy: Delete|reclaimPolicy: Retain|g" storageclass.yaml - kubectl create -f storageclass.yaml -} - -create_sc_with_retain_policy_custom_ns(){ export OPERATOR_NS=$1 export CLUSTER_NS=$2 curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml -o storageclass.yaml sed -i "s|name: rook-cephfs|name: rook-cephfs-retain|g" storageclass.yaml sed -i "s|reclaimPolicy: Delete|reclaimPolicy: Retain|g" storageclass.yaml - sed -i "s|provisioner: rook-ceph.cephfs.csi.ceph.com |provisioner: test-operator.cephfs.csi.ceph.com |g" storageclass.yaml + sed -i "s|provisioner: rook-ceph.cephfs.csi.ceph.com |provisioner: ${OPERATOR_NS}.cephfs.csi.ceph.com |g" storageclass.yaml deploy_with_custom_ns $OPERATOR_NS $CLUSTER_NS storageclass.yaml } @@ -100,7 +93,7 @@ create_stale_subvolume() { sed -i "s|storageClassName: rook-cephfs|storageClassName: rook-cephfs-retain|g" pvc.yaml kubectl create -f pvc.yaml kubectl get pvc cephfs-pvc-retain - wait_for_pvc_to_be_bound_state_default + wait_for_pvc_to_be_bound_state : "${PVNAME:=$(kubectl get pvc cephfs-pvc-retain -o=jsonpath='{.spec.volumeName}')}" kubectl get pvc cephfs-pvc-retain kubectl delete pvc cephfs-pvc-retain @@ -108,9 +101,12 @@ create_stale_subvolume() { } deploy_with_custom_ns() { - sed -i "s|rook-ceph # namespace:operator|$1 # namespace:operator|g" "$3" - sed -i "s|rook-ceph # namespace:cluster|$2 # namespace:cluster|g" "$3" - kubectl create -f "$3" + export OPERATOR_NS=$1 + export CLUSTER_NS=$2 + export MANIFEST=$3 + sed -i "s|rook-ceph # namespace:operator|${OPERATOR_NS} # namespace:operator|g" "${MANIFEST}" + sed -i "s|rook-ceph # namespace:cluster|${CLUSTER_NS} # namespace:cluster|g" "${MANIFEST}" + kubectl create -f "${MANIFEST}" } deploy_csi_driver_default_ns() { @@ -140,8 +136,9 @@ deploy_csi_driver_custom_ns() { kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/pvc.yaml } -wait_for_pvc_to_be_bound_state_default() { +wait_for_pvc_to_be_bound_state() { timeout 100 bash <<-'EOF' + set -x until [ $(kubectl get pvc cephfs-pvc-retain -o jsonpath='{.status.phase}') == "Bound" ]; do echo "waiting for the pvc to be in bound state" sleep 1 @@ -150,19 +147,11 @@ EOF timeout_command_exit_code } -wait_for_pod_to_be_ready_state_default() { - timeout 200 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-osd -n rook-ceph -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do - echo "waiting for the pods to be in ready state" - sleep 1 - done -EOF - timeout_command_exit_code -} - -wait_for_pod_to_be_ready_state_custom() { +wait_for_pod_to_be_ready_state() { + export cluster_ns=$1 timeout 200 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-osd -n test-cluster -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do + set -x + until [ $(kubectl get pod -l app=rook-ceph-osd -n "${cluster_ns}" -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do echo "waiting for the pods to be in ready state" sleep 1 done @@ -170,19 +159,11 @@ EOF timeout_command_exit_code } -wait_for_operator_pod_to_be_ready_state_default() { +wait_for_operator_pod_to_be_ready_state() { + export operator_ns=$1 timeout 100 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-operator -n rook-ceph -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do - echo "waiting for the operator to be in ready state" - sleep 1 - done -EOF - timeout_command_exit_code -} - -wait_for_operator_pod_to_be_ready_state_custom() { - timeout 100 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-operator -n test-operator -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do + set -x + until [ $(kubectl get pod -l app=rook-ceph-operator -n "${operator_ns}" -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do echo "waiting for the operator to be in ready state" sleep 1 done @@ -193,6 +174,7 @@ EOF wait_for_three_mons() { export namespace=$1 timeout 150 bash <<-'EOF' + set -x until [ $(kubectl -n $namespace get deploy -l app=rook-ceph-mon,mon_canary!=true | grep rook-ceph-mon | wc -l | awk '{print $1}' ) -eq 3 ]; do echo "$(date) waiting for three mon deployments to exist" sleep 2 @@ -208,19 +190,10 @@ wait_for_deployment_to_be_running() { kubectl -n "$namespace" wait deployment "$deployment" --for condition=Available=True --timeout=90s } -wait_for_crd_to_be_ready_default() { - timeout 150 bash <<-'EOF' - until [ $(kubectl -n rook-ceph get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do - echo "Waiting for the CephCluster my-cluster to be in the Ready state..." - sleep 2 - done -EOF - timeout_command_exit_code -} - -wait_for_crd_to_be_ready_custom() { +wait_for_crd_to_be_ready() { + export cluster_ns=$1 timeout 150 bash <<-'EOF' - until [ $(kubectl -n test-cluster get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do + until [ $(kubectl -n "${cluster_ns}" get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do echo "Waiting for the CephCluster my-cluster to be in the Ready state..." sleep 2 done