Skip to content

Commit

Permalink
opt
Browse files Browse the repository at this point in the history
  • Loading branch information
shiyuhang0 committed Jun 7, 2024
1 parent 52fa26f commit 578ae4a
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 24 deletions.
1 change: 0 additions & 1 deletion .terraform.tfstate.lock.info

This file was deleted.

4 changes: 0 additions & 4 deletions docs/resources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,6 @@ resource "tidbcloud_cluster" "serverless_tier_cluster" {
- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md).
- `region` (String) the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).

### Optional

- `sync` (Boolean) Whether to wait for the cluster to be available when creating. Default is false.

### Read-Only

- `create_timestamp` (String) The creation time of the cluster in Unix timestamp seconds (epoch time).
Expand Down
33 changes: 16 additions & 17 deletions internal/provider/cluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,11 @@ const (
)

const (
clusterCreateTimeout = time.Hour
clusterUpdateTimeout = time.Hour
clusterCreateInterval = 60 * time.Second
clusterUpdateInterval = 30 * time.Second
clusterServerlessCreateTimeout = 180 * time.Second
clusterCreateTimeout = time.Hour
clusterUpdateTimeout = time.Hour
clusterCreateInterval = 60 * time.Second
clusterUpdateInterval = 20 * time.Second
)

type clusterResourceData struct {
Expand Down Expand Up @@ -435,22 +436,24 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest,
return
}
// set clusterId. other computed attributes are not returned by create, they will be set when refresh
data.ClusterId = types.StringValue(*createClusterResp.Payload.ID)
clusterId := *createClusterResp.Payload.ID
data.ClusterId = types.StringValue(clusterId)
if r.provider.sync {
tflog.Info(ctx, "wait cluster ready")
cluster := &clusterApi.GetClusterOKBody{}
if data.ClusterType == dev {
if err = retry.RetryContext(ctx, clusterCreateTimeout,
waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil {
tflog.Info(ctx, "wait serverless cluster ready")
if err = retry.RetryContext(ctx, clusterServerlessCreateTimeout,
waitClusterReadyFunc(ctx, data.ProjectId, clusterId, r.provider.client, cluster)); err != nil {
resp.Diagnostics.AddError(
"Cluster creation failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
)
return
}
} else {
tflog.Info(ctx, "wait dedicated cluster ready")
if err = RetryWithInterval(ctx, clusterCreateTimeout, clusterCreateInterval,
waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil {
waitClusterReadyFunc(ctx, data.ProjectId, clusterId, r.provider.client, cluster)); err != nil {
resp.Diagnostics.AddError(
"Cluster creation failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
Expand Down Expand Up @@ -482,25 +485,21 @@ func waitClusterReadyFunc(ctx context.Context, projectId, clusterId string,
param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx)
getClusterResp, err := client.GetCluster(param)
if err != nil {
tflog.Warn(ctx, fmt.Sprintf("get clusrer error: %s", err))
if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError {
return retry.NonRetryableError(fmt.Errorf("error getting cluster: %s", err))
} else {
return retry.RetryableError(fmt.Errorf("encountered a server error while reading cluster status - trying again"))
}
}
*cluster = *getClusterResp.Payload

tflog.Debug(ctx, fmt.Sprintf("cluster status: %s", cluster.Status.ClusterStatus))
switch cluster.Status.ClusterStatus {
case string(clusterStatusPaused), string(clusterStatusAvailable):
tflog.Info(ctx, "cluster is ready")
case string(clusterStatusPaused), string(clusterStatusAvailable), string(clusterStatusMaintaining):
return nil
case string(clusterStatusMaintaining):
return retry.NonRetryableError(fmt.Errorf("cluster is under maintaining"))
case string(clusterStatusUnavailable):
return retry.NonRetryableError(fmt.Errorf("cluster is unavailable"))
default:
tflog.Info(ctx, "cluster is not ready")
tflog.Info(ctx, fmt.Sprintf("cluster is not ready, status: %s", cluster.Status.ClusterStatus))
return retry.RetryableError(fmt.Errorf("cluster is not ready yet, got status: %s", cluster.Status.ClusterStatus))
}
}
Expand Down Expand Up @@ -846,7 +845,7 @@ func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest,
tflog.Info(ctx, "wait cluster ready")
cluster := &clusterApi.GetClusterOKBody{}
if err = RetryWithInterval(ctx, clusterUpdateTimeout, clusterUpdateInterval,
waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.String(), r.provider.client, cluster)); err != nil {
waitClusterReadyFunc(ctx, data.ProjectId, data.ClusterId.ValueString(), r.provider.client, cluster)); err != nil {
resp.Diagnostics.AddError(
"Cluster creation failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
Expand Down
8 changes: 6 additions & 2 deletions internal/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.Configur
}

// sync
// p.sync = data.Sync.ValueBool()
p.sync = true
p.sync = data.Sync.ValueBool()
p.client = c
p.configured = true
resp.ResourceData = p
Expand Down Expand Up @@ -169,6 +168,11 @@ func (p *tidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest,
Optional: true,
Sensitive: true,
},
"sync": schema.BoolAttribute{
MarkdownDescription: "Whether to create the cluster synchronously",
Optional: true,
Sensitive: false,
},
},
}
}
Expand Down
Empty file removed test/main.tf
Empty file.

0 comments on commit 578ae4a

Please sign in to comment.