From 7e99e04ebe3443cb7d95ed4c370457e8d7be553f Mon Sep 17 00:00:00 2001 From: Satoru Takeuchi Date: Wed, 19 Jun 2024 07:59:15 +0000 Subject: [PATCH] test: simplify go-test Remove duplicated description between the default configuration and the custom namespace configuration. Signed-off-by: Satoru Takeuchi --- .github/workflows/go-test-config/action.yaml | 214 ++++++++++++ .github/workflows/go-test.yaml | 340 +------------------ tests/github-action-helper.sh | 45 +-- 3 files changed, 228 insertions(+), 371 deletions(-) create mode 100644 .github/workflows/go-test-config/action.yaml diff --git a/.github/workflows/go-test-config/action.yaml b/.github/workflows/go-test-config/action.yaml new file mode 100644 index 00000000..c143238c --- /dev/null +++ b/.github/workflows/go-test-config/action.yaml @@ -0,0 +1,214 @@ +name: go-test +description: "test kubectl-rook-ceph commands" +inputs: + op-ns: + description: operator namespace where rook operator will deploy + required: true + cluster-ns: + description: cluster namespace where ceph cluster will deploy + required: true + +runs: + using: "composite" + steps: + - name: set environment variables + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + OP_NS_OPT="" + CLUSTER_NS_OPT="" + test "${{ inputs.op-ns }}" != rook-ceph && OP_NS_OPT="--operator-namespace ${{ inputs.op-ns }}" + test "${{ inputs.cluster-ns }}" != rook-ceph && CLUSTER_NS_OPT="--cluster-namespace ${{ inputs.cluster-ns }}" + + echo "NS_OPT='${OP_NS_OPT} ${CLUSTER_NS_OPT}'" >> $GITHUB_ENV + + - name: setup golang + uses: ./.github/workflows/set-up-go + + - name: consider debugging + uses: ./.github/workflows/tmate_debug + with: + use-tmate: ${{ secrets.USE_TMATE }} + + - name: setup cluster + uses: ./.github/workflows/cluster-setup + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + op-ns: ${{ inputs.op-ns }} + cluster-ns: ${{ inputs.cluster-ns }} + + - name: build the binary and run unit tests + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + make build + sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph + make test + + - name: Cluster Health + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -e + kubectl rook-ceph "${NS_OPT}" health + + - name: Ceph status + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" ceph status + + - name: Ceph daemon + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT} ceph daemon mon.a dump_historic_ops + + - name: Ceph status using context + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" --context=$(kubectl config current-context) ceph status + + - name: Rados df using context + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" --context=$(kubectl config current-context) rados df + + - name: radosgw-admin create user + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" radosgw-admin user create --display-name="johnny rotten" --uid=johnny + + - name: Mon restore + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + # test the mon restore to restore to mon a, delete mons b and c, then add d and e + kubectl rook-ceph "${NS_OPT}" mons restore-quorum a + kubectl -n ${{inputs.cluster-ns}} wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s + kubectl -n ${{inputs.cluster-ns}} wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s + tests/github-action-helper.sh wait_for_three_mons ${{ inputs.cluster-ns }} + kubectl -n ${{inputs.cluster-ns}} wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s + kubectl -n ${{inputs.cluster-ns}} wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s + + - name: Rbd command + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" rbd ls replicapool + + - name: Flatten a PVC clone + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + tests/github-action-helper.sh install_external_snapshotter + tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound + + kubectl rook-ceph "${NS_OPT} flatten-rbd-pvc rbd-pvc-clone + + - name: Subvolume command + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" ceph fs subvolume create myfs test-subvol group-a + kubectl rook-ceph "${NS_OPT}" subvolume ls + kubectl rook-ceph "${NS_OPT}" subvolume ls --stale + kubectl rook-ceph "${NS_OPT}" subvolume delete test-subvol myfs group-a + tests/github-action-helper.sh create_sc_with_retain_policy + tests/github-action-helper.sh create_stale_subvolume + subVol=$(kubectl rook-ceph "${NS_OPT}" subvolume ls --stale | awk '{print $2}' | grep csi-vol) + kubectl rook_ceph "${NS_OPT}" subvolume delete myfs $subVol + + - name: Get mon endpoints + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" mons + + - name: Update operator configmap + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" operator set ROOK_LOG_LEVEL DEBUG + + - name: Print cr status + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" rook version + kubectl rook-ceph "${NS_OPT}" rook status + kubectl rook-ceph "${NS_OPT}" rook status all + kubectl rook-ceph "${NS_OPT}" rook status cephobjectstores + + - name: Restart operator pod + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" operator restart + # let's wait for operator pod to be restart + POD=$(kubectl -n ${{ inputs.op-ns}} get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") + kubectl -n ${{ inputs.op-ns}} wait --for=delete pod/$POD --timeout=100s + tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state ${{ inputs.op-ns }} + + - name: Maintenance Mode + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl rook_ceph "${NS_OPT}" maintenance start rook-ceph-osd-0 + tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance ${{ inputs.cluster-ns }} + + kubectl rook_ceph "${NS_OPT}" maintenance stop rook-ceph-osd-0 + tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 ${{ inputs.cluster-ns }} + + - name: Purge Osd + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl -n ${{ inputs.cluster-ns}} scale deployment rook-ceph-osd-0 --replicas 0 + kubectl rook-ceph "${NS_OPT}" rook purge-osd 0 --force + + - name: Restore CRD without CRName + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + # First let's delete the cephCluster + kubectl -n ${{ inputs.cluster-ns}} delete cephcluster my-cluster --timeout 3s --wait=false + + kubectl rook-ceph "${NS_OPT}" restore-deleted cephcluster + tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ inputs.cluster-ns }} + + - name: Restore CRD with CRName + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + # First let's delete the cephCluster + kubectl -n ${{ inputs.cluster-ns}} delete cephcluster my-cluster --timeout 3s --wait=false + + kubectl rook-ceph "${NS_OPT}" restore-deleted cephcluster my-cluster + tests/github-action-helper.sh wait_for_crd_to_be_ready ${{ inputs.cluster-ns }} + + - name: Show Cluster State + shell: bash --noprofile --norc -eo pipefail -x {0} + run: | + set -ex + kubectl -n ${{ inputs.cluster-ns}} get all + + - name: Destroy Cluster (removing CRs) + shell: bash --noprofile --norc -eo pipefail -x {0} + env: + ROOK_PLUGIN_SKIP_PROMPTS: true + run: | + set -ex + kubectl rook-ceph "${NS_OPT}" destroy-cluster + sleep 1 + kubectl get deployments --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; exit 1;}) + + - name: collect common logs + if: always() + uses: ./.github/workflows/collect-logs + with: + name: go-test + + - name: consider debugging + if: failure() + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: false diff --git a/.github/workflows/go-test.yaml b/.github/workflows/go-test.yaml index fcc4aad1..92764381 100644 --- a/.github/workflows/go-test.yaml +++ b/.github/workflows/go-test.yaml @@ -23,174 +23,10 @@ jobs: with: fetch-depth: 0 - - name: setup golang - uses: ./.github/workflows/set-up-go - - - name: consider debugging - uses: ./.github/workflows/tmate_debug - with: - use-tmate: ${{ secrets.USE_TMATE }} - - - name: setup cluster - uses: ./.github/workflows/cluster-setup - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - - - name: build the binary and run unit tests - run: | - make build - sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph - make test - - - name: Cluster Health - run: | - set -e - kubectl rook-ceph health - - - name: Ceph status - run: | - set -ex - kubectl rook-ceph ceph status - - - name: Ceph daemon - run: | - set -ex - kubectl rook-ceph ceph daemon mon.a dump_historic_ops - - - name: Ceph status using context - run: | - set -ex - kubectl rook-ceph --context=$(kubectl config current-context) ceph status - - - name: Rados df using context - run: | - set -ex - kubectl rook-ceph --context=$(kubectl config current-context) rados df - - - name: radosgw-admin create user - run: | - set -ex - kubectl rook-ceph radosgw-admin user create --display-name="johnny rotten" --uid=johnny - - - name: Mon restore - run: | - set -ex - # test the mon restore to restore to mon a, delete mons b and c, then add d and e - kubectl rook-ceph mons restore-quorum a - kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s - kubectl -n rook-ceph wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s - tests/github-action-helper.sh wait_for_three_mons rook-ceph - kubectl -n rook-ceph wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s - kubectl -n rook-ceph wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s - - - name: RBD command - run: | - set -ex - kubectl rook-ceph rbd ls replicapool - - - name: Flatten a PVC clone - run: | - set -ex - tests/github-action-helper.sh install_external_snapshotter - tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound - - kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone - - - name: Subvolume command - run: | - set -ex - kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a - kubectl rook-ceph subvolume ls - kubectl rook-ceph subvolume ls --stale - kubectl rook-ceph subvolume delete myfs test-subvol group-a - tests/github-action-helper.sh create_sc_with_retain_policy - tests/github-action-helper.sh create_stale_subvolume - subVol=$(kubectl rook-ceph subvolume ls --stale | awk '{print $2}' | grep csi-vol) - kubectl rook_ceph subvolume delete myfs $subVol - - - name: Get mon endpoints - run: | - set -ex - kubectl rook-ceph mons - - - name: Update operator configmap - run: | - set -ex - kubectl rook-ceph operator set ROOK_LOG_LEVEL DEBUG - - - name: Print cr status - run: | - set -ex - kubectl rook-ceph rook version - kubectl rook-ceph rook status - kubectl rook-ceph rook status all - kubectl rook-ceph rook status cephobjectstores - - - name: Restart operator pod - run: | - set -ex - kubectl rook-ceph operator restart - # let's wait for operator pod to be restart - POD=$(kubectl -n rook-ceph get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") - kubectl -n rook-ceph wait --for=delete pod/$POD --timeout=100s - tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_default - - - name: Maintenance Mode - run: | - set -ex - kubectl rook_ceph maintenance start rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance rook-ceph - - kubectl rook_ceph maintenance stop rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 rook-ceph - - - name: Purge Osd - run: | - set -ex - kubectl -n rook-ceph scale deployment rook-ceph-osd-0 --replicas 0 - kubectl rook-ceph rook purge-osd 0 --force - - - name: Restore CRD without CRName - run: | - # First let's delete the cephCluster - kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph -n rook-ceph restore-deleted cephclusters - tests/github-action-helper.sh wait_for_crd_to_be_ready_default - - - name: Restore CRD with CRName - run: | - # First let's delete the cephCluster - kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph -n rook-ceph restore-deleted cephclusters my-cluster - tests/github-action-helper.sh wait_for_crd_to_be_ready_default - - - name: Show Cluster State - run: | - set -ex - kubectl -n rook-ceph get all - - - name: Destroy Cluster (removing CRs) - env: - ROOK_PLUGIN_SKIP_PROMPTS: true - run: | - set -ex - kubectl rook-ceph destroy-cluster - sleep 1 - kubectl get deployments -n rook-ceph --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n rook-ceph; exit 1;}) - - - name: collect common logs - if: always() - uses: ./.github/workflows/collect-logs - with: - name: go-test - - - name: consider debugging - if: failure() - uses: mxschmitt/action-tmate@v3 + - uses: ./.github/workflows/go-test-config with: - limit-access-to-actor: false + op-ns: "rook-ceph" + cluster-ns: "rook-ceph" custom-namespace: runs-on: ubuntu-20.04 @@ -202,173 +38,7 @@ jobs: with: fetch-depth: 0 - - name: setup golang - uses: ./.github/workflows/set-up-go - - - name: consider debugging - uses: ./.github/workflows/tmate_debug - with: - use-tmate: ${{ secrets.USE_TMATE }} - - - name: setup cluster - uses: ./.github/workflows/cluster-setup + - uses: ./.github/workflows/go-test-config with: - github-token: ${{ secrets.GITHUB_TOKEN }} op-ns: "test-operator" - cluster-ns: "test-cluster" - - - name: build the binary and run unit tests - run: | - make build - sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph - make test - - - name: Cluster Health - run: | - set -e - kubectl rook-ceph --operator-namespace test-operator -n test-cluster health - - - name: Ceph status - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph status - - - name: Ceph daemon - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph daemon osd.0 dump_historic_ops - - - name: Rados df - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rados df - - - name: radosgw-admin create user - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster radosgw-admin user create --display-name="johnny rotten" --uid=johnny - - - name: Ceph status using context - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster --context=$(kubectl config current-context) ceph status - - - name: Mon restore - run: | - set -ex - # test the mon restore to restore to mon a, delete mons b and c, then add d and e - kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons restore-quorum a - kubectl -n test-cluster wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s - kubectl -n test-cluster wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s - tests/github-action-helper.sh wait_for_three_mons test-cluster - kubectl -n test-cluster wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s - kubectl -n test-cluster wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s - - - name: RBD command - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rbd ls replicapool - - - name: Flatten a PVC clone - run: | - set -ex - tests/github-action-helper.sh install_external_snapshotter - tests/github-action-helper.sh wait_for_rbd_pvc_clone_to_be_bound - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster flatten-rbd-pvc rbd-pvc-clone - - - name: Subvolume command - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a - tests/github-action-helper.sh create_sc_with_retain_policy_custom_ns test-operator test-cluster - tests/github-action-helper.sh create_stale_subvolume - subVol=$(kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale | awk '{print $2}' | grep csi-vol) - kubectl rook_ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs $subVol - - - name: Get mon endpoints - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons - - - name: Update operator configmap - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator set ROOK_LOG_LEVEL DEBUG - - - name: Print cr status - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook version - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status all - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status cephobjectstores - - - name: Restart operator pod - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator restart - # let's wait for operator pod to be restart - POD=$(kubectl -n test-operator get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}") - kubectl -n test-operator wait --for=delete pod/$POD --timeout=100s - tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_custom - - - name: Maintenance Mode - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance start rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-maintenance test-cluster - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster maintenance stop rook-ceph-osd-0 - tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 test-cluster - - - name: Purge Osd - run: | - set -ex - kubectl -n test-cluster scale deployment rook-ceph-osd-0 --replicas 0 - kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook purge-osd 0 --force - - - name: Restore CRD without CRName - run: | - # First let's delete the cephCluster - kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters - tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - - - name: Restore CRD with CRName - run: | - # First let's delete the cephCluster - kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters my-cluster - tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - - - name: Show Cluster State - run: | - set -ex - kubectl -n test-cluster get all - - - name: Destroy Cluster (removing CRs) - env: - ROOK_PLUGIN_SKIP_PROMPTS: true - run: | - set -ex - kubectl rook-ceph --operator-namespace test-operator -n test-cluster destroy-cluster - sleep 1 - kubectl get deployments -n test-cluster --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; kubectl get all -n test-cluster; exit 1;}) - - - name: collect common logs - if: always() - uses: ./.github/workflows/collect-logs - with: - name: go-test-custom-namespace - - - name: consider debugging - if: failure() - uses: mxschmitt/action-tmate@v3 - with: - limit-access-to-actor: false + cluster-ns: "test-cluster" diff --git a/tests/github-action-helper.sh b/tests/github-action-helper.sh index ae3226fd..b69d18ba 100755 --- a/tests/github-action-helper.sh +++ b/tests/github-action-helper.sh @@ -150,9 +150,10 @@ EOF timeout_command_exit_code } -wait_for_pod_to_be_ready_state_default() { +wait_for_pod_to_be_ready_state() { + export cluster_ns=$1 timeout 200 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-osd -n rook-ceph -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do + until [ $(kubectl get pod -l app=rook-ceph-osd -n "${cluster_ns}" -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do echo "waiting for the pods to be in ready state" sleep 1 done @@ -160,29 +161,10 @@ EOF timeout_command_exit_code } -wait_for_pod_to_be_ready_state_custom() { - timeout 200 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-osd -n test-cluster -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do - echo "waiting for the pods to be in ready state" - sleep 1 - done -EOF - timeout_command_exit_code -} - -wait_for_operator_pod_to_be_ready_state_default() { - timeout 100 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-operator -n rook-ceph -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do - echo "waiting for the operator to be in ready state" - sleep 1 - done -EOF - timeout_command_exit_code -} - -wait_for_operator_pod_to_be_ready_state_custom() { +wait_for_operator_pod_to_be_ready_state() { + export operator_ns=$1 timeout 100 bash <<-'EOF' - until [ $(kubectl get pod -l app=rook-ceph-operator -n test-operator -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do + until [ $(kubectl get pod -l app=rook-ceph-operator -n "${operator_ns}" -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do echo "waiting for the operator to be in ready state" sleep 1 done @@ -208,19 +190,10 @@ wait_for_deployment_to_be_running() { kubectl -n "$namespace" wait deployment "$deployment" --for condition=Available=True --timeout=90s } -wait_for_crd_to_be_ready_default() { - timeout 150 bash <<-'EOF' - until [ $(kubectl -n rook-ceph get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do - echo "Waiting for the CephCluster my-cluster to be in the Ready state..." - sleep 2 - done -EOF - timeout_command_exit_code -} - -wait_for_crd_to_be_ready_custom() { +wait_for_crd_to_be_ready() { + cluster_ns=$1 timeout 150 bash <<-'EOF' - until [ $(kubectl -n test-cluster get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do + until [ $(kubectl -n "${cluster_ns}" get cephcluster my-cluster -o=jsonpath='{.status.phase}') == "Ready" ]; do echo "Waiting for the CephCluster my-cluster to be in the Ready state..." sleep 2 done