From 578ae4ae564bc2a48d1c6c700fe58ba0291310fa Mon Sep 17 00:00:00 2001 From: shiyuhang <1136742008@qq.com> Date: Fri, 7 Jun 2024 15:02:38 +0800 Subject: [PATCH] opt --- .terraform.tfstate.lock.info | 1 - docs/resources/cluster.md | 4 ---- internal/provider/cluster_resource.go | 33 +++++++++++++-------------- internal/provider/provider.go | 8 +++++-- test/main.tf | 0 5 files changed, 22 insertions(+), 24 deletions(-) delete mode 100644 .terraform.tfstate.lock.info delete mode 100644 test/main.tf diff --git a/.terraform.tfstate.lock.info b/.terraform.tfstate.lock.info deleted file mode 100644 index a7c351a..0000000 --- a/.terraform.tfstate.lock.info +++ /dev/null @@ -1 +0,0 @@ -{"ID":"aa74b862-ec62-d042-f334-24a7cfdbb239","Operation":"OperationTypeApply","Info":"","Who":"shiyuhang@shiyuhangdeMBP","Version":"1.7.5","Created":"2024-06-07T04:24:45.398722Z","Path":"terraform.tfstate"} \ No newline at end of file diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 913af9e..db680b0 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -77,10 +77,6 @@ resource "tidbcloud_cluster" "serverless_tier_cluster" { - `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md). - `region` (String) the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md). -### Optional - -- `sync` (Boolean) Whether to wait for the cluster to be available when creating. Default is false. - ### Read-Only - `create_timestamp` (String) The creation time of the cluster in Unix timestamp seconds (epoch time). diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go index 3750082..60f0c4b 100644 --- a/internal/provider/cluster_resource.go +++ b/internal/provider/cluster_resource.go @@ -41,10 +41,11 @@ const ( ) const ( - clusterCreateTimeout = time.Hour - clusterUpdateTimeout = time.Hour - clusterCreateInterval = 60 * time.Second - clusterUpdateInterval = 30 * time.Second + clusterServerlessCreateTimeout = 180 * time.Second + clusterCreateTimeout = time.Hour + clusterUpdateTimeout = time.Hour + clusterCreateInterval = 60 * time.Second + clusterUpdateInterval = 20 * time.Second ) type clusterResourceData struct { @@ -435,13 +436,14 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, return } // set clusterId. other computed attributes are not returned by create, they will be set when refresh - data.ClusterId = types.StringValue(*createClusterResp.Payload.ID) + clusterId := *createClusterResp.Payload.ID + data.ClusterId = types.StringValue(clusterId) if r.provider.sync { - tflog.Info(ctx, "wait cluster ready") cluster := &clusterApi.GetClusterOKBody{} if data.ClusterType == dev { - if err = retry.RetryContext(ctx, clusterCreateTimeout, - waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil { + tflog.Info(ctx, "wait serverless cluster ready") + if err = retry.RetryContext(ctx, clusterServerlessCreateTimeout, + waitClusterReadyFunc(ctx, data.ProjectId, clusterId, r.provider.client, cluster)); err != nil { resp.Diagnostics.AddError( "Cluster creation failed", fmt.Sprintf("Cluster is not ready, get error: %s", err), @@ -449,8 +451,9 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, return } } else { + tflog.Info(ctx, "wait dedicated cluster ready") if err = RetryWithInterval(ctx, clusterCreateTimeout, clusterCreateInterval, - waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil { + waitClusterReadyFunc(ctx, data.ProjectId, clusterId, r.provider.client, cluster)); err != nil { resp.Diagnostics.AddError( "Cluster creation failed", fmt.Sprintf("Cluster is not ready, get error: %s", err), @@ -482,6 +485,7 @@ func waitClusterReadyFunc(ctx context.Context, projectId, clusterId string, param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx) getClusterResp, err := client.GetCluster(param) if err != nil { + tflog.Warn(ctx, fmt.Sprintf("get clusrer error: %s", err)) if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError { return retry.NonRetryableError(fmt.Errorf("error getting cluster: %s", err)) } else { @@ -489,18 +493,13 @@ func waitClusterReadyFunc(ctx context.Context, projectId, clusterId string, } } *cluster = *getClusterResp.Payload - - tflog.Debug(ctx, fmt.Sprintf("cluster status: %s", cluster.Status.ClusterStatus)) switch cluster.Status.ClusterStatus { - case string(clusterStatusPaused), string(clusterStatusAvailable): - tflog.Info(ctx, "cluster is ready") + case string(clusterStatusPaused), string(clusterStatusAvailable), string(clusterStatusMaintaining): return nil - case string(clusterStatusMaintaining): - return retry.NonRetryableError(fmt.Errorf("cluster is under maintaining")) case string(clusterStatusUnavailable): return retry.NonRetryableError(fmt.Errorf("cluster is unavailable")) default: - tflog.Info(ctx, "cluster is not ready") + tflog.Info(ctx, fmt.Sprintf("cluster is not ready, status: %s", cluster.Status.ClusterStatus)) return retry.RetryableError(fmt.Errorf("cluster is not ready yet, got status: %s", cluster.Status.ClusterStatus)) } } @@ -846,7 +845,7 @@ func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, tflog.Info(ctx, "wait cluster ready") cluster := &clusterApi.GetClusterOKBody{} if err = RetryWithInterval(ctx, clusterUpdateTimeout, clusterUpdateInterval, - waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil { + waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.ValueString(), r.provider.client, cluster)); err != nil { resp.Diagnostics.AddError( "Cluster creation failed", fmt.Sprintf("Cluster is not ready, get error: %s", err), diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7a4b088..cd41cd2 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -129,8 +129,7 @@ func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.Configur } // sync - // p.sync = data.Sync.ValueBool() - p.sync = true + p.sync = data.Sync.ValueBool() p.client = c p.configured = true resp.ResourceData = p @@ -169,6 +168,11 @@ func (p *tidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, Optional: true, Sensitive: true, }, + "sync": schema.BoolAttribute{ + MarkdownDescription: "Whether to create the cluster synchronously", + Optional: true, + Sensitive: false, + }, }, } } diff --git a/test/main.tf b/test/main.tf deleted file mode 100644 index e69de29..0000000