Skip to content
This repository has been archived by the owner on Aug 7, 2023. It is now read-only.

Commit

Permalink
add retries for getting resources, fix acceptance tests (#32)
Browse files Browse the repository at this point in the history
  • Loading branch information
zreigz authored Oct 9, 2020
1 parent e41e333 commit d1c6969
Show file tree
Hide file tree
Showing 12 changed files with 301 additions and 131 deletions.
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ test: fmtcheck
testacc:
# Require following environment variables to be set:
# KUBERMATIC_TOKEN - access token
# KUBERMATIC_HOST - example https://dev.kubermatic.io
# KUBERMATIC_HOST - example https://kubermatic.io
# KUBERMATIC_ANOTHER_USER_EMAIL - email of an existing user to test cluster access sharing
# KUBERMATIC_K8S_VERSION_17 - exact k8s version with prefix `1.17.` that is supported
# KUBERMATIC_K8S_VERSION_16 - exact k8s version with prefix `1.16.` that is supported
# KUBERMATIC_K8S_VERSION - the kubernetes version
# KUBERMATIC_K8S_OLDER_VERSION - lower kubernetes version then KUBERMATIC_K8S_VERSION
# KUBERMATIC_OPENSTACK_IMAGE - an image available for openstack clusters
# KUBERMATIC_OPENSTACK_IMAGE2 - another image available for openstack clusters
# KUBERMATIC_OPENSTACK_FLAVOR - openstack flavor to use
Expand Down
2 changes: 1 addition & 1 deletion kubermatic/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const (
// wait this time before starting resource checks
requestDelay = time.Second
// smallest time to wait before refreshes
retryTimeout = 2 * time.Second
retryTimeout = time.Second
)

type kubermaticProviderMeta struct {
Expand Down
17 changes: 5 additions & 12 deletions kubermatic/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,10 @@ import (
const (
testNamePrefix = "tf-acc-test-"

testEnvOtherUserEmail = "KUBERMATIC_ANOTHER_USER_EMAIL"
testEnvExistingClusterID = "KUBERMATIC_EXISTING_CLUSTER_ID"
testEnvOtherUserEmail = "KUBERMATIC_ANOTHER_USER_EMAIL"

testEnvK8sVersion17 = "KUBERMATIC_K8S_VERSION_17"
testEnvK8sVersion16 = "KUBERMATIC_K8S_VERSION_16"
testEnvK8sVersion = "KUBERMATIC_K8S_VERSION"
testEnvK8sOlderVersion = "KUBERMATIC_K8S_OLDER_VERSION"

testEnvOpenstackNodeDC = "KUBERMATIC_OPENSTACK_NODE_DC"
testEnvOpenstackUsername = "KUBERMATIC_OPENSTACK_USERNAME"
Expand Down Expand Up @@ -70,12 +69,6 @@ func testAccPreCheckForOpenstack(t *testing.T) {
checkEnv(t, testEnvOpenstackFlavor)
}

func testAccPreCheckExistingCluster(t *testing.T) {
t.Helper()
testAccPreCheck(t)
checkEnv(t, testEnvExistingClusterID)
}

func testAccPreCheckForAzure(t *testing.T) {
t.Helper()
testAccPreCheck(t)
Expand All @@ -100,8 +93,8 @@ func testAccPreCheck(t *testing.T) {
t.Helper()
checkEnv(t, "KUBERMATIC_HOST")
checkEnv(t, "KUBERMATIC_TOKEN")
checkEnv(t, testEnvK8sVersion17)
checkEnv(t, testEnvK8sVersion16)
checkEnv(t, testEnvK8sVersion)
checkEnv(t, testEnvK8sOlderVersion)
}

func checkEnv(t *testing.T, n string) {
Expand Down
19 changes: 12 additions & 7 deletions kubermatic/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,11 @@ func resourceClusterCreate(d *schema.ResourceData, m interface{}) error {
return err
}

if err := waitClusterReady(k, d); err != nil {
projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id())
if err != nil {
return err
}
if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil {
return fmt.Errorf("cluster '%s' is not ready: %v", r.Payload.ID, err)
}

Expand Down Expand Up @@ -472,7 +476,11 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error {
d.SetPartial("sshkeys")
}

if err := waitClusterReady(k, d); err != nil {
projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id())
if err != nil {
return err
}
if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil {
return fmt.Errorf("cluster '%s' is not ready: %v", d.Id(), err)
}

Expand Down Expand Up @@ -572,12 +580,9 @@ func assignSSHKeysToCluster(projectID, seedDC, clusterID string, sshkeyIDs []str
return nil
}

func waitClusterReady(k *kubermaticProviderMeta, d *schema.ResourceData) error {
func waitClusterReady(k *kubermaticProviderMeta, d *schema.ResourceData, projectID, seedDC, clusterID string) error {
return resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id())
if err != nil {
return resource.NonRetryableError(err)
}

p := project.NewGetClusterHealthParams()
p.SetProjectID(projectID)
p.SetDC(seedDC)
Expand Down
12 changes: 6 additions & 6 deletions kubermatic/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func TestAccKubermaticCluster_Openstack_Basic(t *testing.T) {
password := os.Getenv(testEnvOpenstackPassword)
tenant := os.Getenv(testEnvOpenstackTenant)
nodeDC := os.Getenv(testEnvOpenstackNodeDC)
versionK8s17 := os.Getenv(testEnvK8sVersion17)
versionK8s17 := os.Getenv(testEnvK8sVersion)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForOpenstack(t) },
Providers: testAccProviders,
Expand Down Expand Up @@ -214,8 +214,8 @@ func TestAccKubermaticCluster_Openstack_UpgradeVersion(t *testing.T) {
versionedConfig := func(version string) string {
return testAccCheckKubermaticClusterOpenstackBasic(testName, username, password, tenant, nodeDC, version)
}
versionK8s16 := os.Getenv(testEnvK8sVersion16)
versionK8s17 := os.Getenv(testEnvK8sVersion17)
versionK8s16 := os.Getenv(testEnvK8sOlderVersion)
versionK8s17 := os.Getenv(testEnvK8sVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForOpenstack(t) },
Expand Down Expand Up @@ -374,7 +374,7 @@ func TestAccKubermaticCluster_SSHKeys(t *testing.T) {
password := os.Getenv(testEnvOpenstackPassword)
tenant := os.Getenv(testEnvOpenstackTenant)
nodeDC := os.Getenv(testEnvOpenstackNodeDC)
k8sVersion17 := os.Getenv(testEnvK8sVersion17)
k8sVersion17 := os.Getenv(testEnvK8sVersion)

configClusterWithKey1 := testAccCheckKubermaticClusterOpenstackBasicWithSSHKey1(testName, username, password, tenant, nodeDC, k8sVersion17)
configClusterWithKey2 := testAccCheckKubermaticClusterOpenstackBasicWithSSHKey2(testName, username, password, tenant, nodeDC, k8sVersion17)
Expand Down Expand Up @@ -524,7 +524,7 @@ func TestAccKubermaticCluster_Azure_Basic(t *testing.T) {
tenantID := os.Getenv(testEnvAzureTenantID)
subsID := os.Getenv(testEnvAzureSubscriptionID)
nodeDC := os.Getenv(testEnvAzureNodeDC)
k8sVersion := os.Getenv(testEnvK8sVersion17)
k8sVersion := os.Getenv(testEnvK8sVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForAzure(t) },
Expand Down Expand Up @@ -578,7 +578,7 @@ func TestAccKubermaticCluster_AWS_Basic(t *testing.T) {
awsSecretAccessKey := os.Getenv(testAWSSecretAccessKey)
vpcID := os.Getenv(testEnvAWSVPCID)
nodeDC := os.Getenv(testEnvAWSNodeDC)
k8sVersion17 := os.Getenv(testEnvK8sVersion17)
k8sVersion17 := os.Getenv(testEnvK8sVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForAWS(t) },
Expand Down
23 changes: 10 additions & 13 deletions kubermatic/resource_node_deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,11 @@ func readNodeDeploymentPreservedValues(d *schema.ResourceData) *nodeSpecPreserve
}

func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error {

projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Get("cluster_id").(string))
if err != nil {
return err
}

k := m.(*kubermaticProviderMeta)

p := project.NewCreateNodeDeploymentParams()
Expand All @@ -91,10 +91,14 @@ func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error {
Spec: expandNodeDeploymentSpec(d.Get("spec").([]interface{})),
})

if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil {
return fmt.Errorf("cluster is not ready: %v", err)
}

r, err := k.client.Project.CreateNodeDeployment(p, k.auth)
if err != nil {
if e, ok := err.(*project.CreateNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" {
return fmt.Errorf("create node deployment: %s", errorMessage(e.Payload))
return fmt.Errorf("unable to create node deployment: %s", errorMessage(e.Payload))
}

return fmt.Errorf("unable to create a node deployment: %v", getErrorResponse(err))
Expand All @@ -106,6 +110,7 @@ func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error {
}

return resourceNodeDeploymentRead(d, m)

}

func kubermaticNodeDeploymentMakeID(projectID, seedDC, clusterID, id string) string {
Expand Down Expand Up @@ -181,7 +186,7 @@ func resourceNodeDeploymentUpdate(d *schema.ResourceData, m interface{}) error {
r, err := k.client.Project.PatchNodeDeployment(p, k.auth)
if err != nil {
if e, ok := err.(*project.PatchNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" {
return fmt.Errorf(errorMessage(e.Payload))
return fmt.Errorf("unable to update a node deployment: %v", errorMessage(e.Payload))
}
return fmt.Errorf("unable to update a node deployment: %v", getErrorResponse(err))
}
Expand All @@ -194,7 +199,7 @@ func resourceNodeDeploymentUpdate(d *schema.ResourceData, m interface{}) error {
}

func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration, projectID, seedDC, clusterID, id string) error {
err := resource.Retry(timeout, func() *resource.RetryError {
return resource.Retry(timeout, func() *resource.RetryError {
p := project.NewGetNodeDeploymentParams()
p.SetProjectID(projectID)
p.SetClusterID(clusterID)
Expand All @@ -203,11 +208,7 @@ func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration,

r, err := k.client.Project.GetNodeDeployment(p, k.auth)
if err != nil {
if e, ok := err.(*project.GetNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" {
// Sometimes api returns 500 which often means try later
return resource.RetryableError(fmt.Errorf("unable to get node deployment '%s' status: %s: %v", id, errorMessage(e.Payload), err))
}
return resource.NonRetryableError(fmt.Errorf("unable to get node deployment '%s' status: %v", id, getErrorResponse(err)))
return resource.RetryableError(fmt.Errorf("unable to get node deployment %v", err))
}

if r.Payload.Status.ReadyReplicas < *r.Payload.Spec.Replicas {
Expand All @@ -216,10 +217,6 @@ func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration,
}
return nil
})
if err != nil {
return fmt.Errorf("node deployment '%s' is not ready: %v", id, err)
}
return nil
}

func resourceNodeDeploymentDelete(d *schema.ResourceData, m interface{}) error {
Expand Down
19 changes: 9 additions & 10 deletions kubermatic/resource_node_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ func TestAccKubermaticNodeDeployment_Openstack_Basic(t *testing.T) {
image := os.Getenv(testEnvOpenstackImage)
image2 := os.Getenv(testEnvOpenstackImage2)
flavor := os.Getenv(testEnvOpenstackFlavor)
k8sVersion17 := os.Getenv(testEnvK8sVersion17)
kubeletVersion16 := os.Getenv(testEnvK8sVersion16)
k8sVersion17 := os.Getenv(testEnvK8sVersion)
kubeletVersion16 := os.Getenv(testEnvK8sOlderVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForOpenstack(t) },
Expand Down Expand Up @@ -256,15 +256,15 @@ func TestAccKubermaticNodeDeployment_Azure_Basic(t *testing.T) {
subsID := os.Getenv(testEnvAzureSubscriptionID)
nodeDC := os.Getenv(testEnvAzureNodeDC)
nodeSize := os.Getenv(testEnvAzureNodeSize)
k8sVersion17 := os.Getenv(testEnvK8sVersion17)
k8sVersion17 := os.Getenv(testEnvK8sVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForAzure(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckKubermaticNodeDeploymentDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckKubermaticNodeDeploymentAzureBasic(testName, clientID, clientSecret, tenantID, subsID, nodeDC, nodeSize, k8sVersion17, k8sVersion17),
Config: testAccCheckKubermaticNodeDeploymentAzureBasic(testName, clientID, clientSecret, tenantID, subsID, nodeDC, nodeSize, k8sVersion17),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubermaticNodeDeploymentExists("kubermatic_node_deployment.acctest_nd", &nodedepl),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.azure.0.size", nodeSize),
Expand All @@ -274,7 +274,7 @@ func TestAccKubermaticNodeDeployment_Azure_Basic(t *testing.T) {
})
}

func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, tenantID, subscID, nodeDC, nodeSize, k8sVersion, kubeletVersion string) string {
func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, tenantID, subscID, nodeDC, nodeSize, k8sVersion string) string {
return fmt.Sprintf(`
resource "kubermatic_project" "acctest_project" {
name = "%s"
Expand Down Expand Up @@ -302,9 +302,8 @@ func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, t
cluster_id = kubermatic_cluster.acctest_cluster.id
name = "%s"
spec {
replicas = 2
replicas = 1
template {
dynamic_config = false
cloud {
azure {
size = "%s"
Expand All @@ -320,7 +319,7 @@ func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, t
}
}
}
}`, n, n, nodeDC, k8sVersion, clientID, clientSecret, tenantID, subscID, n, nodeSize, kubeletVersion)
}`, n, n, nodeDC, k8sVersion, clientID, clientSecret, tenantID, subscID, n, nodeSize, k8sVersion)
}

func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) {
Expand All @@ -335,7 +334,7 @@ func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) {
subnetID := os.Getenv(testEnvAWSSubnetID)
availabilityZone := os.Getenv(testEnvAWSAvailabilityZone)
diskSize := os.Getenv(testEnvAWSDiskSize)
k8sVersion17 := os.Getenv(testEnvK8sVersion17)
k8sVersion17 := os.Getenv(testEnvK8sVersion)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheckForAWS(t) },
Expand All @@ -348,7 +347,7 @@ func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) {
testAccCheckKubermaticNodeDeploymentExists("kubermatic_node_deployment.acctest_nd", &nodedepl),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.instance_type", instanceType),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.disk_size", diskSize),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.volume_type", "standart"),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.volume_type", "standard"),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.subnet_id", subnetID),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.availability_zone", availabilityZone),
resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.assign_public_ip", "true"),
Expand Down
59 changes: 44 additions & 15 deletions kubermatic/resource_project.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package kubermatic
import (
"fmt"
"net/http"
"time"

"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
Expand All @@ -13,8 +14,10 @@ import (
)

const (
projectActive = "Active"
projectInactive = "Inactive"
projectActive = "Active"
projectInactive = "Inactive"
usersReady = "Ready"
usersUnavailable = "Unavailable"
)

func resourceProject() *schema.Resource {
Expand Down Expand Up @@ -115,16 +118,16 @@ func resourceProjectCreate(d *schema.ResourceData, m interface{}) error {
r, err := k.client.Project.GetProject(p.WithProjectID(id), k.auth)
if err != nil {
if e, ok := err.(*project.GetProjectDefault); ok && (e.Code() == http.StatusForbidden || e.Code() == http.StatusNotFound) {
return r, projectInactive, nil
return r, projectInactive, fmt.Errorf("project not ready: %v", err)
}
return nil, "", err
}
k.log.Debugf("creating project '%s', currently in '%s' state", id, r.Payload.Status)
return r, r.Payload.Status, nil
return r, projectActive, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
MinTimeout: retryTimeout,
Delay: requestDelay,
MinTimeout: 5 * retryTimeout,
Delay: 5 * requestDelay,
}

if _, err := createStateConf.WaitForState(); err != nil {
Expand Down Expand Up @@ -321,17 +324,43 @@ func kubermaticProjectCurrentUser(k *kubermaticProviderMeta) (*models.User, erro
}

func kubermaticProjectPersistedUsers(k *kubermaticProviderMeta, id string) (map[string]models.User, error) {
p := users.NewGetUsersForProjectParams()
p.SetProjectID(id)
r, err := k.client.Users.GetUsersForProject(p, k.auth)
if err != nil {
return nil, fmt.Errorf("get users for project errored: %v", err)
listStateConf := &resource.StateChangeConf{
Pending: []string{
usersUnavailable,
},
Target: []string{
usersReady,
},
Refresh: func() (interface{}, string, error) {
p := users.NewGetUsersForProjectParams()
p.SetProjectID(id)

r, err := k.client.Users.GetUsersForProject(p, k.auth)
if err != nil {
// wait for the RBACs
if _, ok := err.(*users.GetUsersForProjectForbidden); ok {
return r, usersUnavailable, nil
}
return nil, usersUnavailable, fmt.Errorf("get users for project error: %v", err)
}
ret := make(map[string]models.User)
for _, p := range r.Payload {
ret[p.Email] = *p
}
return ret, usersReady, nil
},
Timeout: 10 * time.Second,
Delay: 5 * requestDelay,
}
ret := make(map[string]models.User)
for _, p := range r.Payload {
ret[p.Email] = *p

rawUsers, err := listStateConf.WaitForState()
if err != nil {
k.log.Debugf("error while waiting for the users %v", err)
return nil, fmt.Errorf("error while waiting for the users %v", err)
}
return ret, nil
users := rawUsers.(map[string]models.User)

return users, nil
}

func kubermaticProjectConfiguredUsers(d *schema.ResourceData) map[string]models.User {
Expand Down
Loading

0 comments on commit d1c6969

Please sign in to comment.