From 0b900f51a163aa45d7eee9e7c0c1c03f2194fb70 Mon Sep 17 00:00:00 2001 From: Joanna Grycz <37943406+gryczj@users.noreply.github.com> Date: Tue, 3 Sep 2024 17:22:00 +0200 Subject: [PATCH 1/4] fix: compute_hyperdisk_create_from_pool (#3808) * fix: compute_hyperdisk_create_from_pool * Fix for createComputeHyperdiskPool.test.js --- .../disks/createComputeHyperdiskFromPool.js | 4 +- compute/disks/createComputeHyperdiskPool.js | 2 +- .../createComputeHyperdiskFromPool.test.js | 122 ++++++++---------- .../test/createComputeHyperdiskPool.test.js | 65 ---------- 4 files changed, 57 insertions(+), 136 deletions(-) delete mode 100644 compute/test/createComputeHyperdiskPool.test.js diff --git a/compute/disks/createComputeHyperdiskFromPool.js b/compute/disks/createComputeHyperdiskFromPool.js index 8f1c224964..32c09214f4 100644 --- a/compute/disks/createComputeHyperdiskFromPool.js +++ b/compute/disks/createComputeHyperdiskFromPool.js @@ -35,9 +35,9 @@ async function main() { // The zone where your VM and new disk are located. const zone = 'europe-central2-b'; // The name of the new disk - const diskName = 'disk-name-from-pool'; + const diskName = 'disk-from-pool-name'; // The name of the storage pool - const storagePoolName = 'storage-pool-name-hyperdisk'; + const storagePoolName = 'storage-pool-name'; // Link to the storagePool you want to use. Use format: // https://www.googleapis.com/compute/v1/projects/{projectId}/zones/{zone}/storagePools/{storagePoolName} const storagePool = `https://www.googleapis.com/compute/v1/projects/${projectId}/zones/${zone}/storagePools/${storagePoolName}`; diff --git a/compute/disks/createComputeHyperdiskPool.js b/compute/disks/createComputeHyperdiskPool.js index d8ae457e6b..d1ce4e6549 100644 --- a/compute/disks/createComputeHyperdiskPool.js +++ b/compute/disks/createComputeHyperdiskPool.js @@ -33,7 +33,7 @@ async function main() { // Project ID or project number of the Google Cloud project you want to use. const projectId = await storagePoolClient.getProjectId(); // Name of the zone in which you want to create the storagePool. - const zone = 'us-central1-a'; + const zone = 'europe-central2-b'; // Name of the storagePool you want to create. const storagePoolName = 'storage-pool-name'; // The type of disk you want to create. This value uses the following format: diff --git a/compute/test/createComputeHyperdiskFromPool.test.js b/compute/test/createComputeHyperdiskFromPool.test.js index e210e66c35..1a7130cafe 100644 --- a/compute/test/createComputeHyperdiskFromPool.test.js +++ b/compute/test/createComputeHyperdiskFromPool.test.js @@ -20,17 +20,56 @@ const path = require('path'); const {assert} = require('chai'); const {after, before, describe, it} = require('mocha'); const cp = require('child_process'); -const {DisksClient, StoragePoolsClient} = require('@google-cloud/compute').v1; +const {DisksClient, StoragePoolsClient, ZoneOperationsClient} = + require('@google-cloud/compute').v1; const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); const cwd = path.join(__dirname, '..'); +async function cleanupResources(projectId, zone, diskName, storagePoolName) { + const disksClient = new DisksClient(); + const storagePoolsClient = new StoragePoolsClient(); + const zoneOperationsClient = new ZoneOperationsClient(); + // Delete disk attached to storagePool + const [diskResponse] = await disksClient.delete({ + project: projectId, + disk: diskName, + zone, + }); + + let diskOperation = diskResponse.latestResponse; + + // Wait for the delete disk operation to complete. + while (diskOperation.status !== 'DONE') { + [diskOperation] = await zoneOperationsClient.wait({ + operation: diskOperation.name, + project: projectId, + zone: diskOperation.zone.split('/').pop(), + }); + } + + const [poolResponse] = await storagePoolsClient.delete({ + project: projectId, + storagePool: storagePoolName, + zone, + }); + let poolOperation = poolResponse.latestResponse; + + // Wait for the delete pool operation to complete. + while (poolOperation.status !== 'DONE') { + [poolOperation] = await zoneOperationsClient.wait({ + operation: poolOperation.name, + project: projectId, + zone: poolOperation.zone.split('/').pop(), + }); + } +} + describe('Create compute hyperdisk from pool', async () => { - const diskName = 'disk-name-from-pool'; + const diskName = 'disk-from-pool-name'; const zone = 'europe-central2-b'; - const storagePoolName = 'storage-pool-name-hyperdisk'; + const storagePoolName = 'storage-pool-name'; const disksClient = new DisksClient(); - const storagePoolsClient = new StoragePoolsClient(); let projectId; before(async () => { @@ -38,78 +77,25 @@ describe('Create compute hyperdisk from pool', async () => { // Ensure resources are deleted before attempting to recreate them try { - await disksClient.delete({ - project: projectId, - disk: diskName, - zone, - }); - } catch (err) { - // Should be ok to ignore (resource doesn't exist) - console.error(err); - } - - try { - await storagePoolsClient.delete({ - project: projectId, - storagePool: storagePoolName, - zone, - }); + await cleanupResources(projectId, zone, diskName, storagePoolName); } catch (err) { - // Should be ok to ignore (resource doesn't exist) + // Should be ok to ignore (resources do not exist) console.error(err); } - - await storagePoolsClient.insert({ - project: projectId, - storagePoolResource: { - name: storagePoolName, - poolProvisionedCapacityGb: 10240, - poolProvisionedIops: 10000, - poolProvisionedThroughput: 1024, - storagePoolType: `projects/${projectId}/zones/${zone}/storagePoolTypes/hyperdisk-balanced`, - capacityProvisioningType: 'advanced', - zone, - }, - zone, - }); }); after(async () => { - // Trying to delete the disk too quickly seems to fail - const deleteDisk = async () => { - setTimeout(async () => { - await disksClient.delete({ - project: projectId, - disk: diskName, - zone, - }); - }, 120 * 1000); // wait two minutes - }; - - try { - await deleteDisk(); - } catch { - // Try one more time after repeating the delay - await deleteDisk(); - } + await cleanupResources(projectId, zone, diskName, storagePoolName); + }); - // Need enough time after removing the disk before removing the pool - const deletePool = async () => { - setTimeout(async () => { - await storagePoolsClient.delete({ - project: projectId, - storagePool: storagePoolName, - zone, - }); - }, 120 * 1000); // wait two minutes - }; + it('should create a new storage pool', () => { + const response = JSON.parse( + execSync('node ./disks/createComputeHyperdiskPool.js', { + cwd, + }) + ); - try { - await deletePool(); - } catch { - // Try one more time after repeating the delay - await deletePool(); - } + assert.equal(response.name, storagePoolName); }); it('should create a new hyperdisk from pool', () => { diff --git a/compute/test/createComputeHyperdiskPool.test.js b/compute/test/createComputeHyperdiskPool.test.js deleted file mode 100644 index f3d2cfb441..0000000000 --- a/compute/test/createComputeHyperdiskPool.test.js +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -const path = require('path'); -const assert = require('node:assert/strict'); -const {after, before, describe, it} = require('mocha'); -const cp = require('child_process'); -const {StoragePoolsClient} = require('@google-cloud/compute').v1; - -const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); -const cwd = path.join(__dirname, '..'); - -describe('Create compute hyperdisk pool', async () => { - const storagePoolName = 'storage-pool-name'; - const zone = 'us-central1-a'; - const storagePoolsClient = new StoragePoolsClient(); - let projectId; - - before(async () => { - projectId = await storagePoolsClient.getProjectId(); - try { - // Ensure resource is deleted attempting to recreate it - await storagePoolsClient.delete({ - project: projectId, - storagePool: storagePoolName, - zone, - }); - } catch { - // ok to ignore (resource doesn't exist) - } - }); - - after(async () => { - await storagePoolsClient.delete({ - project: projectId, - storagePool: storagePoolName, - zone, - }); - }); - - it('should create a new storage pool', () => { - const response = JSON.parse( - execSync('node ./disks/createComputeHyperdiskPool.js', { - cwd, - }) - ); - - assert.equal(response.name, storagePoolName); - }); -}); From 03987a492eb0d2963d3fa096c6f48e26e70f55f1 Mon Sep 17 00:00:00 2001 From: Brian Dorsey Date: Tue, 3 Sep 2024 20:04:16 +0000 Subject: [PATCH 2/4] disable test log reporting to flaky-bot --- .github/workflows/flakybot.yaml | 25 ++----------------------- .kokoro/build-with-appengine.sh | 13 ------------- .kokoro/build-with-run.sh | 11 ----------- .kokoro/build.sh | 16 ---------------- 4 files changed, 2 insertions(+), 63 deletions(-) diff --git a/.github/workflows/flakybot.yaml b/.github/workflows/flakybot.yaml index ee25e21394..a70cb87bba 100644 --- a/.github/workflows/flakybot.yaml +++ b/.github/workflows/flakybot.yaml @@ -28,26 +28,5 @@ jobs: contents: 'read' id-token: 'write' steps: - - name: authenticate - uses: 'google-github-actions/auth@62cf5bd3e4211a0a0b51f2c6d6a37129d828611d' # v2 - with: - workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider' - service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com' - create_credentials_file: 'true' - access_token_lifetime: 600s - - name: download test results - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4 - with: - name: test-results - - name: download FlakyBot - run: | - curl -s -L https://github.com/googleapis/repo-automation-bots/archive/refs/tags/flakybot-v${{ env.FLAKYBOT_VERSION }}.tar.gz -o flakybot.tar.gz - tar xzf flakybot.tar.gz - cp -rT repo-automation-bots-flakybot-v${{ env.FLAKYBOT_VERSION}}/packages/flakybot/ . - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 - with: - cache: true - cache-dependency-path: '${{ github.workspace }}/go.sum' - go-version-file: '${{ github.workspace }}/go.mod' - - name: run FlakyBot - run: go run flakybot.go --repo GoogleCloudPlatform/nodejs-docs-samples --commit_hash ${{github.sha}} --build_url https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} + - name: DISABLED run FlakyBot + run: echo flakybot error reporting disabled \ No newline at end of file diff --git a/.kokoro/build-with-appengine.sh b/.kokoro/build-with-appengine.sh index 2d12bbf62a..c7c452c5c5 100755 --- a/.kokoro/build-with-appengine.sh +++ b/.kokoro/build-with-appengine.sh @@ -61,19 +61,6 @@ trap cleanup EXIT HUP # Install dependencies and run tests npm install -# If tests are running against main, configure FlakyBot -# to open issues on failures: -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then - export MOCHA_REPORTER_SUITENAME=${PROJECT} - notify_flakybot() { - # Call the original trap function. - cleanup - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - } - trap notify_flakybot EXIT HUP -fi - npm test exit $? diff --git a/.kokoro/build-with-run.sh b/.kokoro/build-with-run.sh index 989989ff06..94040df99d 100755 --- a/.kokoro/build-with-run.sh +++ b/.kokoro/build-with-run.sh @@ -118,17 +118,6 @@ export SERVICE_NAME="${SAMPLE_NAME}-${SUFFIX}" export NODE_ENV=development npm install -# If tests are running against main, configure FlakyBot -# to open issues on failures: -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then - export MOCHA_REPORTER_SUITENAME=${PROJECT} - notify_flakybot() { - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - } - trap notify_flakybot EXIT HUP -fi - # Configure Cloud SQL variables for deploying idp-sql sample export DB_NAME="kokoro_ci" export DB_USER="kokoro_ci" diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 3e6de7cb29..e36f615e39 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -139,22 +139,6 @@ print_logfile() { echo '----- End ${MOCHA_REPORTER_OUTPUT} -----' } -# If tests are running against main, configure FlakyBot -# to open issues on failures: -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then - export MOCHA_REPORTER_SUITENAME=${PROJECT} - cleanup() { - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - - # We can only set one trap per signal, so run `print_logfile` here - print_logfile - } - trap cleanup EXIT HUP -else - trap print_logfile EXIT HUP -fi - npm test exit $? From b01e8440216d7a0d21d56f7a4a157bb2ad4ea1d7 Mon Sep 17 00:00:00 2001 From: Brian Dorsey Date: Tue, 3 Sep 2024 20:15:20 +0000 Subject: [PATCH 3/4] Revert "disable test log reporting to flaky-bot" This reverts commit 03987a492eb0d2963d3fa096c6f48e26e70f55f1. --- .github/workflows/flakybot.yaml | 25 +++++++++++++++++++++++-- .kokoro/build-with-appengine.sh | 13 +++++++++++++ .kokoro/build-with-run.sh | 11 +++++++++++ .kokoro/build.sh | 16 ++++++++++++++++ 4 files changed, 63 insertions(+), 2 deletions(-) diff --git a/.github/workflows/flakybot.yaml b/.github/workflows/flakybot.yaml index a70cb87bba..ee25e21394 100644 --- a/.github/workflows/flakybot.yaml +++ b/.github/workflows/flakybot.yaml @@ -28,5 +28,26 @@ jobs: contents: 'read' id-token: 'write' steps: - - name: DISABLED run FlakyBot - run: echo flakybot error reporting disabled \ No newline at end of file + - name: authenticate + uses: 'google-github-actions/auth@62cf5bd3e4211a0a0b51f2c6d6a37129d828611d' # v2 + with: + workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider' + service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com' + create_credentials_file: 'true' + access_token_lifetime: 600s + - name: download test results + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4 + with: + name: test-results + - name: download FlakyBot + run: | + curl -s -L https://github.com/googleapis/repo-automation-bots/archive/refs/tags/flakybot-v${{ env.FLAKYBOT_VERSION }}.tar.gz -o flakybot.tar.gz + tar xzf flakybot.tar.gz + cp -rT repo-automation-bots-flakybot-v${{ env.FLAKYBOT_VERSION}}/packages/flakybot/ . + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 + with: + cache: true + cache-dependency-path: '${{ github.workspace }}/go.sum' + go-version-file: '${{ github.workspace }}/go.mod' + - name: run FlakyBot + run: go run flakybot.go --repo GoogleCloudPlatform/nodejs-docs-samples --commit_hash ${{github.sha}} --build_url https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} diff --git a/.kokoro/build-with-appengine.sh b/.kokoro/build-with-appengine.sh index c7c452c5c5..2d12bbf62a 100755 --- a/.kokoro/build-with-appengine.sh +++ b/.kokoro/build-with-appengine.sh @@ -61,6 +61,19 @@ trap cleanup EXIT HUP # Install dependencies and run tests npm install +# If tests are running against main, configure FlakyBot +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then + export MOCHA_REPORTER_SUITENAME=${PROJECT} + notify_flakybot() { + # Call the original trap function. + cleanup + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap notify_flakybot EXIT HUP +fi + npm test exit $? diff --git a/.kokoro/build-with-run.sh b/.kokoro/build-with-run.sh index 94040df99d..989989ff06 100755 --- a/.kokoro/build-with-run.sh +++ b/.kokoro/build-with-run.sh @@ -118,6 +118,17 @@ export SERVICE_NAME="${SAMPLE_NAME}-${SUFFIX}" export NODE_ENV=development npm install +# If tests are running against main, configure FlakyBot +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then + export MOCHA_REPORTER_SUITENAME=${PROJECT} + notify_flakybot() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap notify_flakybot EXIT HUP +fi + # Configure Cloud SQL variables for deploying idp-sql sample export DB_NAME="kokoro_ci" export DB_USER="kokoro_ci" diff --git a/.kokoro/build.sh b/.kokoro/build.sh index e36f615e39..3e6de7cb29 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -139,6 +139,22 @@ print_logfile() { echo '----- End ${MOCHA_REPORTER_OUTPUT} -----' } +# If tests are running against main, configure FlakyBot +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"release"* ]]; then + export MOCHA_REPORTER_SUITENAME=${PROJECT} + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + + # We can only set one trap per signal, so run `print_logfile` here + print_logfile + } + trap cleanup EXIT HUP +else + trap print_logfile EXIT HUP +fi + npm test exit $? From 2a47751e99ee9b9b835fe6820a24984dfcdfaf08 Mon Sep 17 00:00:00 2001 From: Joanna Grycz <37943406+gryczj@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:29:13 +0200 Subject: [PATCH 4/4] feat: compute reservation create (#3807) * Changed debian-10 -> debian-11 * feat: compute_reservation_create --- .../disks/createComputeHyperdiskFromPool.js | 2 +- compute/disks/createComputeHyperdiskPool.js | 2 +- .../createReservationFromProperties.js | 117 ++++++++++++++++++ .../createComputeHyperdiskFromPool.test.js | 2 +- .../createReservationFromProperties.test.js | 83 +++++++++++++ 5 files changed, 203 insertions(+), 3 deletions(-) create mode 100644 compute/reservations/createReservationFromProperties.js create mode 100644 compute/test/createReservationFromProperties.test.js diff --git a/compute/disks/createComputeHyperdiskFromPool.js b/compute/disks/createComputeHyperdiskFromPool.js index 32c09214f4..3a2c34d09d 100644 --- a/compute/disks/createComputeHyperdiskFromPool.js +++ b/compute/disks/createComputeHyperdiskFromPool.js @@ -33,7 +33,7 @@ async function main() { // Project ID or project number of the Google Cloud project you want to use. const projectId = await disksClient.getProjectId(); // The zone where your VM and new disk are located. - const zone = 'europe-central2-b'; + const zone = 'us-central1-a'; // The name of the new disk const diskName = 'disk-from-pool-name'; // The name of the storage pool diff --git a/compute/disks/createComputeHyperdiskPool.js b/compute/disks/createComputeHyperdiskPool.js index d1ce4e6549..d8ae457e6b 100644 --- a/compute/disks/createComputeHyperdiskPool.js +++ b/compute/disks/createComputeHyperdiskPool.js @@ -33,7 +33,7 @@ async function main() { // Project ID or project number of the Google Cloud project you want to use. const projectId = await storagePoolClient.getProjectId(); // Name of the zone in which you want to create the storagePool. - const zone = 'europe-central2-b'; + const zone = 'us-central1-a'; // Name of the storagePool you want to create. const storagePoolName = 'storage-pool-name'; // The type of disk you want to create. This value uses the following format: diff --git a/compute/reservations/createReservationFromProperties.js b/compute/reservations/createReservationFromProperties.js new file mode 100644 index 0000000000..a21bdccee6 --- /dev/null +++ b/compute/reservations/createReservationFromProperties.js @@ -0,0 +1,117 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +async function main() { + // [START compute_reservation_create] + // Import the Compute library + const computeLib = require('@google-cloud/compute'); + const compute = computeLib.protos.google.cloud.compute.v1; + + // Instantiate a reservationsClient + const reservationsClient = new computeLib.ReservationsClient(); + // Instantiate a zoneOperationsClient + const zoneOperationsClient = new computeLib.ZoneOperationsClient(); + + /** + * TODO(developer): Update these variables before running the sample. + */ + // The ID of the project where you want to reserve resources and where the instance template exists. + const projectId = await reservationsClient.getProjectId(); + // The zone in which to reserve resources. + const zone = 'us-central1-a'; + // The name of the reservation to create. + const reservationName = 'reservation-01'; + // The number of VMs to reserve. + const vmsNumber = 3; + // Machine type to use for each VM. + const machineType = 'n1-standard-4'; + + async function callCreateComputeReservationFromProperties() { + // Create specific reservation for 3 VMs that each use an N1 predefined machine type with 4 vCPUs. + const specificReservation = new compute.AllocationSpecificSKUReservation({ + count: vmsNumber, + instanceProperties: { + machineType, + // To have the reserved VMs use a specific minimum CPU platform instead of the zone's default CPU platform. + minCpuPlatform: 'Intel Skylake', + // If you want to attach GPUs to your reserved N1 VMs, update and uncomment guestAccelerators if needed. + guestAccelerators: [ + { + // The number of GPUs to add per reserved VM. + acceleratorCount: 1, + // Supported GPU model for N1 VMs. Ensure that your chosen GPU model is available in the zone, + // where you want to reserve resources. + acceleratorType: 'nvidia-tesla-t4', + }, + ], + // If you want to add local SSD disks to each reserved VM, update and uncomment localSsds if needed. + // You can specify up to 24 Local SSD disks. Each Local SSD disk is 375 GB. + localSsds: [ + { + diskSizeGb: 375, + // The type of interface you want each Local SSD disk to use. Specify one of the following values: NVME or SCSI. + // Make sure that the machine type you specify for the reserved VMs supports the chosen disk interfaces. + interface: 'NVME', + }, + ], + }, + }); + + // Create a reservation. + const reservation = new compute.Reservation({ + name: reservationName, + zone, + specificReservation, + }); + + const [response] = await reservationsClient.insert({ + project: projectId, + reservationResource: reservation, + zone, + }); + + let operation = response.latestResponse; + + // Wait for the create reservation operation to complete. + while (operation.status !== 'DONE') { + [operation] = await zoneOperationsClient.wait({ + operation: operation.name, + project: projectId, + zone: operation.zone.split('/').pop(), + }); + } + + const createdReservation = ( + await reservationsClient.get({ + project: projectId, + zone, + reservation: reservationName, + }) + )[0]; + + console.log(JSON.stringify(createdReservation)); + } + + await callCreateComputeReservationFromProperties(); + // [END compute_reservation_create] +} + +main().catch(err => { + console.error(err); + process.exitCode = 1; +}); diff --git a/compute/test/createComputeHyperdiskFromPool.test.js b/compute/test/createComputeHyperdiskFromPool.test.js index 1a7130cafe..5813a2158c 100644 --- a/compute/test/createComputeHyperdiskFromPool.test.js +++ b/compute/test/createComputeHyperdiskFromPool.test.js @@ -67,7 +67,7 @@ async function cleanupResources(projectId, zone, diskName, storagePoolName) { describe('Create compute hyperdisk from pool', async () => { const diskName = 'disk-from-pool-name'; - const zone = 'europe-central2-b'; + const zone = 'us-central1-a'; const storagePoolName = 'storage-pool-name'; const disksClient = new DisksClient(); let projectId; diff --git a/compute/test/createReservationFromProperties.test.js b/compute/test/createReservationFromProperties.test.js new file mode 100644 index 0000000000..9c7512bf7d --- /dev/null +++ b/compute/test/createReservationFromProperties.test.js @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +const path = require('path'); +const assert = require('node:assert/strict'); +const {after, before, describe, it} = require('mocha'); +const cp = require('child_process'); +const {ReservationsClient} = require('@google-cloud/compute').v1; + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const cwd = path.join(__dirname, '..'); + +describe('Create compute reservation by specyfing properties directly', async () => { + const reservationName = 'reservation-01'; + const zone = 'us-central1-a'; + const reservationsClient = new ReservationsClient(); + let projectId; + + before(async () => { + projectId = await reservationsClient.getProjectId(); + }); + + after(async () => { + await reservationsClient.delete({ + project: projectId, + reservation: reservationName, + zone, + }); + }); + + it('should create a new reservation', () => { + const instanceProperties = { + _machineType: 'machineType', + _minCpuPlatform: 'minCpuPlatform', + guestAccelerators: [ + { + _acceleratorCount: 'acceleratorCount', + _acceleratorType: 'acceleratorType', + acceleratorCount: 1, + acceleratorType: 'nvidia-tesla-t4', + }, + ], + localSsds: [ + { + diskSizeGb: '375', + interface: 'NVME', + _diskSizeGb: 'diskSizeGb', + _interface: 'interface', + }, + ], + machineType: 'n1-standard-4', + minCpuPlatform: 'Intel Skylake', + }; + + const response = JSON.parse( + execSync('node ./reservations/createReservationFromProperties.js', { + cwd, + }) + ); + + assert.equal(response.name, reservationName); + assert.equal(response.specificReservation.count, '3'); + assert.deepEqual( + response.specificReservation.instanceProperties, + instanceProperties + ); + }); +});