Skip to content

Commit

Permalink
add sync option for cluster create (#136)
Browse files Browse the repository at this point in the history
---------

Co-authored-by: Xiang Zhang <[email protected]>
  • Loading branch information
shiyuhang0 and zhangyangyu authored Jun 11, 2024
1 parent 2f7eb07 commit a66f4c5
Show file tree
Hide file tree
Showing 12 changed files with 235 additions and 42 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,4 @@ website/vendor

test.tf
.terraform.lock.hcl
.terraform.tfstate.lock.info
8 changes: 8 additions & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
}
# If you want to create or update the cluster resource synchronously, set the sync to true
provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
sync = true
}
```

<!-- schema generated by tfplugindocs -->
Expand All @@ -40,3 +47,4 @@ provider "tidbcloud" {

- `private_key` (String, Sensitive) Private Key
- `public_key` (String, Sensitive) Public Key
- `sync` (Boolean) Whether to create or update the cluster resource synchronously
1 change: 1 addition & 0 deletions docs/resources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ terraform {
provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
sync = true
}
resource "tidbcloud_cluster" "dedicated_tier_cluster" {
Expand Down
7 changes: 7 additions & 0 deletions examples/provider/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,10 @@ provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
}

# If you want to create or update the cluster resource synchronously, set the sync to true
provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
sync = true
}
1 change: 1 addition & 0 deletions examples/resources/tidbcloud_cluster/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ terraform {
provider "tidbcloud" {
public_key = "fake_public_key"
private_key = "fake_private_key"
sync = true
}

resource "tidbcloud_cluster" "dedicated_tier_cluster" {
Expand Down
3 changes: 2 additions & 1 deletion examples/workflows/tidbcloud_dedicated_tier/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ terraform {

provider "tidbcloud" {
# export TIDBCLOUD_PUBLIC_KEY and TIDBCLOUD_PRIVATE_KEY with the TiDB Cloud API Key
sync = true
}

data "tidbcloud_projects" "projects" {
Expand All @@ -94,4 +95,4 @@ resource "tidbcloud_cluster" "example" {

output "connection_strings" {
value = lookup(tidbcloud_cluster.example.status, "connection_strings")
}
}
3 changes: 2 additions & 1 deletion examples/workflows/tidbcloud_serverless_tier/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ terraform {

provider "tidbcloud" {
# export TIDBCLOUD_PUBLIC_KEY and TIDBCLOUD_PRIVATE_KEY with the TiDB Cloud API Key
sync = true
}

data "tidbcloud_projects" "projects" {
Expand All @@ -43,4 +44,4 @@ resource "tidbcloud_cluster" "example" {

output "connection_strings" {
value = lookup(tidbcloud_cluster.example.status, "connection_strings")
}
}
56 changes: 56 additions & 0 deletions examples/workflows/tidbcloud_serverless_using_project_name/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
variable "project_name" {
type = string
nullable = false
default = "default_project"
}

variable "cluster_name" {
type = string
nullable = false
}

variable "cloud_provider_region" {
type = string
nullable = false
default = "us-east-1"
}

variable "password" {
type = string
nullable = false
sensitive = true
}

terraform {
required_providers {
tidbcloud = {
source = "tidbcloud/tidbcloud"
}
}
}

provider "tidbcloud" {
# export TIDBCLOUD_PUBLIC_KEY and TIDBCLOUD_PRIVATE_KEY with the TiDB Cloud API Key
sync = true
}

data "tidbcloud_projects" "projects" {
page_size = "50"
}

locals {
project_id = {
value = element([for s in data.tidbcloud_projects.projects.items : s.id if s.name == var.project_name], 0)
}
}

resource "tidbcloud_cluster" "example" {
project_id = local.project_id.value
name = var.cluster_name
cluster_type = "DEVELOPER"
cloud_provider = "AWS"
region = var.cloud_provider_region
config = {
root_password = var.password
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
cluster_name = "cluster0"
password = "12345678"
project_name = "project_name"
150 changes: 132 additions & 18 deletions internal/provider/cluster_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@ package provider
import (
"context"
"fmt"
"net/http"
"sort"
"strings"
"time"

clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster"
"github.com/hashicorp/terraform-plugin-framework/path"
Expand All @@ -16,11 +18,37 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
"github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud"
)

const dev = "DEVELOPER"
const ded = "DEDICATED"

// Enum: [AVAILABLE CREATING MODIFYING PAUSED RESUMING UNAVAILABLE IMPORTING MAINTAINING PAUSING]
type clusterStatus string

const (
clusterStatusCreating clusterStatus = "CREATING"
clusterStatusAvailable clusterStatus = "AVAILABLE"
clusterStatusModifying clusterStatus = "MODIFYING"
clusterStatusPaused clusterStatus = "PAUSED"
clusterStatusResuming clusterStatus = "RESUMING"
clusterStatusUnavailable clusterStatus = "UNAVAILABLE"
clusterStatusImporting clusterStatus = "IMPORTING"
clusterStatusMaintaining clusterStatus = "MAINTAINING"
clusterStatusPausing clusterStatus = "PAUSING"
)

const (
clusterServerlessCreateTimeout = 180 * time.Second
clusterServerlessCreateInterval = 2 * time.Second
clusterCreateTimeout = time.Hour
clusterCreateInterval = 60 * time.Second
clusterUpdateTimeout = time.Hour
clusterUpdateInterval = 20 * time.Second
)

type clusterResourceData struct {
ClusterId types.String `tfsdk:"id"`
ProjectId string `tfsdk:"project_id"`
Expand Down Expand Up @@ -409,17 +437,43 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest,
return
}
// set clusterId. other computed attributes are not returned by create, they will be set when refresh
data.ClusterId = types.StringValue(*createClusterResp.Payload.ID)

// we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic
tflog.Trace(ctx, "read cluster_resource")
getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())
getClusterResp, err := r.provider.client.GetCluster(getClusterParams)
if err != nil {
resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err))
return
clusterId := *createClusterResp.Payload.ID
data.ClusterId = types.StringValue(clusterId)
if r.provider.sync {
var cluster *clusterApi.GetClusterOKBody
if data.ClusterType == dev {
tflog.Info(ctx, "wait serverless cluster ready")
cluster, err = WaitClusterReady(ctx, clusterServerlessCreateTimeout, clusterServerlessCreateInterval, data.ProjectId, clusterId, r.provider.client)
if err != nil {
resp.Diagnostics.AddError(
"Cluster creation failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
)
return
}
} else {
tflog.Info(ctx, "wait dedicated cluster ready")
cluster, err = WaitClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, data.ProjectId, clusterId, r.provider.client)
if err != nil {
resp.Diagnostics.AddError(
"Cluster creation failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
)
return
}
}
refreshClusterResourceData(ctx, cluster, &data)
} else {
// we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic
tflog.Trace(ctx, "read cluster_resource")
getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())
getClusterResp, err := r.provider.client.GetCluster(getClusterParams)
if err != nil {
resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err))
return
}
refreshClusterResourceData(ctx, getClusterResp.Payload, &data)
}
refreshClusterResourceData(ctx, getClusterResp.Payload, &data)

// save into the Terraform state.
diags = resp.State.Set(ctx, &data)
Expand Down Expand Up @@ -516,7 +570,6 @@ func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, res
data.Config.RootPassword = rootPassword
data.Config.IPAccessList = iPAccessList
data.Config.Paused = paused

refreshClusterResourceData(ctx, getClusterResp.Payload, &data)

// save into the Terraform state
Expand Down Expand Up @@ -763,14 +816,27 @@ func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest,
return
}

// we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic
tflog.Trace(ctx, "read cluster_resource")
getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()))
if err != nil {
resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err))
return
if r.provider.sync {
tflog.Info(ctx, "wait cluster ready")
cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client)
if err != nil {
resp.Diagnostics.AddError(
"Cluster update failed",
fmt.Sprintf("Cluster is not ready, get error: %s", err),
)
return
}
refreshClusterResourceData(ctx, cluster, &data)
} else {
// we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic
tflog.Trace(ctx, "read cluster_resource")
getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()))
if err != nil {
resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err))
return
}
refreshClusterResourceData(ctx, getClusterResp.Payload, &data)
}
refreshClusterResourceData(ctx, getClusterResp.Payload, &data)

// save into the Terraform state.
diags = resp.State.Set(ctx, &data)
Expand Down Expand Up @@ -809,3 +875,51 @@ func (r clusterResource) ImportState(ctx context.Context, req resource.ImportSta
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...)
}

func WaitClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, projectId, clusterId string,
client tidbcloud.TiDBCloudClient) (*clusterApi.GetClusterOKBody, error) {
stateConf := &retry.StateChangeConf{
Pending: []string{
string(clusterStatusCreating),
string(clusterStatusModifying),
string(clusterStatusResuming),
string(clusterStatusUnavailable),
string(clusterStatusImporting),
string(clusterStatusPausing),
},
Target: []string{
string(clusterStatusAvailable),
string(clusterStatusPaused),
string(clusterStatusMaintaining),
},
Timeout: timeout,
MinTimeout: 500 * time.Millisecond,
PollInterval: interval,
Refresh: clusterStateRefreshFunc(ctx, projectId, clusterId, client),
}

outputRaw, err := stateConf.WaitForStateContext(ctx)

if output, ok := outputRaw.(*clusterApi.GetClusterOKBody); ok {
return output, err
}
return nil, err
}

func clusterStateRefreshFunc(ctx context.Context, projectId, clusterId string,
client tidbcloud.TiDBCloudClient) retry.StateRefreshFunc {
return func() (interface{}, string, error) {
param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx)
getClusterResp, err := client.GetCluster(param)
if err != nil {
tflog.Warn(ctx, fmt.Sprintf("get cluster error: %s", err))
if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError {
return nil, "", err
} else {
// regard as not found and retry again. Default is 20 times
return nil, "", nil
}
}
return getClusterResp.Payload, getClusterResp.Payload.Status.ClusterStatus, nil
}
}
Loading

0 comments on commit a66f4c5

Please sign in to comment.