diff --git a/Makefile b/Makefile index 33fd0ac..51fedb0 100644 --- a/Makefile +++ b/Makefile @@ -22,10 +22,10 @@ test: fmtcheck testacc: # Require following environment variables to be set: # KUBERMATIC_TOKEN - access token -# KUBERMATIC_HOST - example https://dev.kubermatic.io +# KUBERMATIC_HOST - example https://kubermatic.io # KUBERMATIC_ANOTHER_USER_EMAIL - email of an existing user to test cluster access sharing -# KUBERMATIC_K8S_VERSION_17 - exact k8s version with prefix `1.17.` that is supported -# KUBERMATIC_K8S_VERSION_16 - exact k8s version with prefix `1.16.` that is supported +# KUBERMATIC_K8S_VERSION - the kubernetes version +# KUBERMATIC_K8S_OLDER_VERSION - lower kubernetes version then KUBERMATIC_K8S_VERSION # KUBERMATIC_OPENSTACK_IMAGE - an image available for openstack clusters # KUBERMATIC_OPENSTACK_IMAGE2 - another image available for openstack clusters # KUBERMATIC_OPENSTACK_FLAVOR - openstack flavor to use diff --git a/kubermatic/provider.go b/kubermatic/provider.go index e60fe15..17e07b4 100644 --- a/kubermatic/provider.go +++ b/kubermatic/provider.go @@ -23,7 +23,7 @@ const ( // wait this time before starting resource checks requestDelay = time.Second // smallest time to wait before refreshes - retryTimeout = 2 * time.Second + retryTimeout = time.Second ) type kubermaticProviderMeta struct { diff --git a/kubermatic/provider_test.go b/kubermatic/provider_test.go index 4a91778..9a9e9c9 100644 --- a/kubermatic/provider_test.go +++ b/kubermatic/provider_test.go @@ -14,11 +14,10 @@ import ( const ( testNamePrefix = "tf-acc-test-" - testEnvOtherUserEmail = "KUBERMATIC_ANOTHER_USER_EMAIL" - testEnvExistingClusterID = "KUBERMATIC_EXISTING_CLUSTER_ID" + testEnvOtherUserEmail = "KUBERMATIC_ANOTHER_USER_EMAIL" - testEnvK8sVersion17 = "KUBERMATIC_K8S_VERSION_17" - testEnvK8sVersion16 = "KUBERMATIC_K8S_VERSION_16" + testEnvK8sVersion = "KUBERMATIC_K8S_VERSION" + testEnvK8sOlderVersion = "KUBERMATIC_K8S_OLDER_VERSION" testEnvOpenstackNodeDC = "KUBERMATIC_OPENSTACK_NODE_DC" testEnvOpenstackUsername = "KUBERMATIC_OPENSTACK_USERNAME" @@ -70,12 +69,6 @@ func testAccPreCheckForOpenstack(t *testing.T) { checkEnv(t, testEnvOpenstackFlavor) } -func testAccPreCheckExistingCluster(t *testing.T) { - t.Helper() - testAccPreCheck(t) - checkEnv(t, testEnvExistingClusterID) -} - func testAccPreCheckForAzure(t *testing.T) { t.Helper() testAccPreCheck(t) @@ -100,8 +93,8 @@ func testAccPreCheck(t *testing.T) { t.Helper() checkEnv(t, "KUBERMATIC_HOST") checkEnv(t, "KUBERMATIC_TOKEN") - checkEnv(t, testEnvK8sVersion17) - checkEnv(t, testEnvK8sVersion16) + checkEnv(t, testEnvK8sVersion) + checkEnv(t, testEnvK8sOlderVersion) } func checkEnv(t *testing.T, n string) { diff --git a/kubermatic/resource_cluster.go b/kubermatic/resource_cluster.go index 8a1622b..20032ec 100644 --- a/kubermatic/resource_cluster.go +++ b/kubermatic/resource_cluster.go @@ -202,7 +202,11 @@ func resourceClusterCreate(d *schema.ResourceData, m interface{}) error { return err } - if err := waitClusterReady(k, d); err != nil { + projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id()) + if err != nil { + return err + } + if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil { return fmt.Errorf("cluster '%s' is not ready: %v", r.Payload.ID, err) } @@ -472,7 +476,11 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { d.SetPartial("sshkeys") } - if err := waitClusterReady(k, d); err != nil { + projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id()) + if err != nil { + return err + } + if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil { return fmt.Errorf("cluster '%s' is not ready: %v", d.Id(), err) } @@ -572,12 +580,9 @@ func assignSSHKeysToCluster(projectID, seedDC, clusterID string, sshkeyIDs []str return nil } -func waitClusterReady(k *kubermaticProviderMeta, d *schema.ResourceData) error { +func waitClusterReady(k *kubermaticProviderMeta, d *schema.ResourceData, projectID, seedDC, clusterID string) error { return resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { - projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Id()) - if err != nil { - return resource.NonRetryableError(err) - } + p := project.NewGetClusterHealthParams() p.SetProjectID(projectID) p.SetDC(seedDC) diff --git a/kubermatic/resource_cluster_test.go b/kubermatic/resource_cluster_test.go index e06c36d..521d764 100644 --- a/kubermatic/resource_cluster_test.go +++ b/kubermatic/resource_cluster_test.go @@ -21,7 +21,7 @@ func TestAccKubermaticCluster_Openstack_Basic(t *testing.T) { password := os.Getenv(testEnvOpenstackPassword) tenant := os.Getenv(testEnvOpenstackTenant) nodeDC := os.Getenv(testEnvOpenstackNodeDC) - versionK8s17 := os.Getenv(testEnvK8sVersion17) + versionK8s17 := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForOpenstack(t) }, Providers: testAccProviders, @@ -214,8 +214,8 @@ func TestAccKubermaticCluster_Openstack_UpgradeVersion(t *testing.T) { versionedConfig := func(version string) string { return testAccCheckKubermaticClusterOpenstackBasic(testName, username, password, tenant, nodeDC, version) } - versionK8s16 := os.Getenv(testEnvK8sVersion16) - versionK8s17 := os.Getenv(testEnvK8sVersion17) + versionK8s16 := os.Getenv(testEnvK8sOlderVersion) + versionK8s17 := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForOpenstack(t) }, @@ -374,7 +374,7 @@ func TestAccKubermaticCluster_SSHKeys(t *testing.T) { password := os.Getenv(testEnvOpenstackPassword) tenant := os.Getenv(testEnvOpenstackTenant) nodeDC := os.Getenv(testEnvOpenstackNodeDC) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) + k8sVersion17 := os.Getenv(testEnvK8sVersion) configClusterWithKey1 := testAccCheckKubermaticClusterOpenstackBasicWithSSHKey1(testName, username, password, tenant, nodeDC, k8sVersion17) configClusterWithKey2 := testAccCheckKubermaticClusterOpenstackBasicWithSSHKey2(testName, username, password, tenant, nodeDC, k8sVersion17) @@ -524,7 +524,7 @@ func TestAccKubermaticCluster_Azure_Basic(t *testing.T) { tenantID := os.Getenv(testEnvAzureTenantID) subsID := os.Getenv(testEnvAzureSubscriptionID) nodeDC := os.Getenv(testEnvAzureNodeDC) - k8sVersion := os.Getenv(testEnvK8sVersion17) + k8sVersion := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForAzure(t) }, @@ -578,7 +578,7 @@ func TestAccKubermaticCluster_AWS_Basic(t *testing.T) { awsSecretAccessKey := os.Getenv(testAWSSecretAccessKey) vpcID := os.Getenv(testEnvAWSVPCID) nodeDC := os.Getenv(testEnvAWSNodeDC) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) + k8sVersion17 := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForAWS(t) }, diff --git a/kubermatic/resource_node_deployment.go b/kubermatic/resource_node_deployment.go index 782637a..fcd41e0 100644 --- a/kubermatic/resource_node_deployment.go +++ b/kubermatic/resource_node_deployment.go @@ -75,11 +75,11 @@ func readNodeDeploymentPreservedValues(d *schema.ResourceData) *nodeSpecPreserve } func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error { + projectID, seedDC, clusterID, err := kubermaticClusterParseID(d.Get("cluster_id").(string)) if err != nil { return err } - k := m.(*kubermaticProviderMeta) p := project.NewCreateNodeDeploymentParams() @@ -91,10 +91,14 @@ func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error { Spec: expandNodeDeploymentSpec(d.Get("spec").([]interface{})), }) + if err := waitClusterReady(k, d, projectID, seedDC, clusterID); err != nil { + return fmt.Errorf("cluster is not ready: %v", err) + } + r, err := k.client.Project.CreateNodeDeployment(p, k.auth) if err != nil { if e, ok := err.(*project.CreateNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" { - return fmt.Errorf("create node deployment: %s", errorMessage(e.Payload)) + return fmt.Errorf("unable to create node deployment: %s", errorMessage(e.Payload)) } return fmt.Errorf("unable to create a node deployment: %v", getErrorResponse(err)) @@ -106,6 +110,7 @@ func resourceNodeDeploymentCreate(d *schema.ResourceData, m interface{}) error { } return resourceNodeDeploymentRead(d, m) + } func kubermaticNodeDeploymentMakeID(projectID, seedDC, clusterID, id string) string { @@ -181,7 +186,7 @@ func resourceNodeDeploymentUpdate(d *schema.ResourceData, m interface{}) error { r, err := k.client.Project.PatchNodeDeployment(p, k.auth) if err != nil { if e, ok := err.(*project.PatchNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" { - return fmt.Errorf(errorMessage(e.Payload)) + return fmt.Errorf("unable to update a node deployment: %v", errorMessage(e.Payload)) } return fmt.Errorf("unable to update a node deployment: %v", getErrorResponse(err)) } @@ -194,7 +199,7 @@ func resourceNodeDeploymentUpdate(d *schema.ResourceData, m interface{}) error { } func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration, projectID, seedDC, clusterID, id string) error { - err := resource.Retry(timeout, func() *resource.RetryError { + return resource.Retry(timeout, func() *resource.RetryError { p := project.NewGetNodeDeploymentParams() p.SetProjectID(projectID) p.SetClusterID(clusterID) @@ -203,11 +208,7 @@ func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration, r, err := k.client.Project.GetNodeDeployment(p, k.auth) if err != nil { - if e, ok := err.(*project.GetNodeDeploymentDefault); ok && errorMessage(e.Payload) != "" { - // Sometimes api returns 500 which often means try later - return resource.RetryableError(fmt.Errorf("unable to get node deployment '%s' status: %s: %v", id, errorMessage(e.Payload), err)) - } - return resource.NonRetryableError(fmt.Errorf("unable to get node deployment '%s' status: %v", id, getErrorResponse(err))) + return resource.RetryableError(fmt.Errorf("unable to get node deployment %v", err)) } if r.Payload.Status.ReadyReplicas < *r.Payload.Spec.Replicas { @@ -216,10 +217,6 @@ func waitForNodeDeploymentRead(k *kubermaticProviderMeta, timeout time.Duration, } return nil }) - if err != nil { - return fmt.Errorf("node deployment '%s' is not ready: %v", id, err) - } - return nil } func resourceNodeDeploymentDelete(d *schema.ResourceData, m interface{}) error { diff --git a/kubermatic/resource_node_deployment_test.go b/kubermatic/resource_node_deployment_test.go index 632b752..790c8dd 100644 --- a/kubermatic/resource_node_deployment_test.go +++ b/kubermatic/resource_node_deployment_test.go @@ -22,8 +22,8 @@ func TestAccKubermaticNodeDeployment_Openstack_Basic(t *testing.T) { image := os.Getenv(testEnvOpenstackImage) image2 := os.Getenv(testEnvOpenstackImage2) flavor := os.Getenv(testEnvOpenstackFlavor) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) - kubeletVersion16 := os.Getenv(testEnvK8sVersion16) + k8sVersion17 := os.Getenv(testEnvK8sVersion) + kubeletVersion16 := os.Getenv(testEnvK8sOlderVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForOpenstack(t) }, @@ -256,7 +256,7 @@ func TestAccKubermaticNodeDeployment_Azure_Basic(t *testing.T) { subsID := os.Getenv(testEnvAzureSubscriptionID) nodeDC := os.Getenv(testEnvAzureNodeDC) nodeSize := os.Getenv(testEnvAzureNodeSize) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) + k8sVersion17 := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForAzure(t) }, @@ -264,7 +264,7 @@ func TestAccKubermaticNodeDeployment_Azure_Basic(t *testing.T) { CheckDestroy: testAccCheckKubermaticNodeDeploymentDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckKubermaticNodeDeploymentAzureBasic(testName, clientID, clientSecret, tenantID, subsID, nodeDC, nodeSize, k8sVersion17, k8sVersion17), + Config: testAccCheckKubermaticNodeDeploymentAzureBasic(testName, clientID, clientSecret, tenantID, subsID, nodeDC, nodeSize, k8sVersion17), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckKubermaticNodeDeploymentExists("kubermatic_node_deployment.acctest_nd", &nodedepl), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.azure.0.size", nodeSize), @@ -274,7 +274,7 @@ func TestAccKubermaticNodeDeployment_Azure_Basic(t *testing.T) { }) } -func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, tenantID, subscID, nodeDC, nodeSize, k8sVersion, kubeletVersion string) string { +func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, tenantID, subscID, nodeDC, nodeSize, k8sVersion string) string { return fmt.Sprintf(` resource "kubermatic_project" "acctest_project" { name = "%s" @@ -302,9 +302,8 @@ func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, t cluster_id = kubermatic_cluster.acctest_cluster.id name = "%s" spec { - replicas = 2 + replicas = 1 template { - dynamic_config = false cloud { azure { size = "%s" @@ -320,7 +319,7 @@ func testAccCheckKubermaticNodeDeploymentAzureBasic(n, clientID, clientSecret, t } } } - }`, n, n, nodeDC, k8sVersion, clientID, clientSecret, tenantID, subscID, n, nodeSize, kubeletVersion) + }`, n, n, nodeDC, k8sVersion, clientID, clientSecret, tenantID, subscID, n, nodeSize, k8sVersion) } func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) { @@ -335,7 +334,7 @@ func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) { subnetID := os.Getenv(testEnvAWSSubnetID) availabilityZone := os.Getenv(testEnvAWSAvailabilityZone) diskSize := os.Getenv(testEnvAWSDiskSize) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) + k8sVersion17 := os.Getenv(testEnvK8sVersion) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheckForAWS(t) }, @@ -348,7 +347,7 @@ func TestAccKubermaticNodeDeployment_AWS_Basic(t *testing.T) { testAccCheckKubermaticNodeDeploymentExists("kubermatic_node_deployment.acctest_nd", &nodedepl), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.instance_type", instanceType), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.disk_size", diskSize), - resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.volume_type", "standart"), + resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.volume_type", "standard"), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.subnet_id", subnetID), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.availability_zone", availabilityZone), resource.TestCheckResourceAttr("kubermatic_node_deployment.acctest_nd", "spec.0.template.0.cloud.0.aws.0.assign_public_ip", "true"), diff --git a/kubermatic/resource_project.go b/kubermatic/resource_project.go index d1f8b04..b9d4463 100644 --- a/kubermatic/resource_project.go +++ b/kubermatic/resource_project.go @@ -3,6 +3,7 @@ package kubermatic import ( "fmt" "net/http" + "time" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -13,8 +14,10 @@ import ( ) const ( - projectActive = "Active" - projectInactive = "Inactive" + projectActive = "Active" + projectInactive = "Inactive" + usersReady = "Ready" + usersUnavailable = "Unavailable" ) func resourceProject() *schema.Resource { @@ -115,16 +118,16 @@ func resourceProjectCreate(d *schema.ResourceData, m interface{}) error { r, err := k.client.Project.GetProject(p.WithProjectID(id), k.auth) if err != nil { if e, ok := err.(*project.GetProjectDefault); ok && (e.Code() == http.StatusForbidden || e.Code() == http.StatusNotFound) { - return r, projectInactive, nil + return r, projectInactive, fmt.Errorf("project not ready: %v", err) } return nil, "", err } k.log.Debugf("creating project '%s', currently in '%s' state", id, r.Payload.Status) - return r, r.Payload.Status, nil + return r, projectActive, nil }, Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: retryTimeout, - Delay: requestDelay, + MinTimeout: 5 * retryTimeout, + Delay: 5 * requestDelay, } if _, err := createStateConf.WaitForState(); err != nil { @@ -321,17 +324,43 @@ func kubermaticProjectCurrentUser(k *kubermaticProviderMeta) (*models.User, erro } func kubermaticProjectPersistedUsers(k *kubermaticProviderMeta, id string) (map[string]models.User, error) { - p := users.NewGetUsersForProjectParams() - p.SetProjectID(id) - r, err := k.client.Users.GetUsersForProject(p, k.auth) - if err != nil { - return nil, fmt.Errorf("get users for project errored: %v", err) + listStateConf := &resource.StateChangeConf{ + Pending: []string{ + usersUnavailable, + }, + Target: []string{ + usersReady, + }, + Refresh: func() (interface{}, string, error) { + p := users.NewGetUsersForProjectParams() + p.SetProjectID(id) + + r, err := k.client.Users.GetUsersForProject(p, k.auth) + if err != nil { + // wait for the RBACs + if _, ok := err.(*users.GetUsersForProjectForbidden); ok { + return r, usersUnavailable, nil + } + return nil, usersUnavailable, fmt.Errorf("get users for project error: %v", err) + } + ret := make(map[string]models.User) + for _, p := range r.Payload { + ret[p.Email] = *p + } + return ret, usersReady, nil + }, + Timeout: 10 * time.Second, + Delay: 5 * requestDelay, } - ret := make(map[string]models.User) - for _, p := range r.Payload { - ret[p.Email] = *p + + rawUsers, err := listStateConf.WaitForState() + if err != nil { + k.log.Debugf("error while waiting for the users %v", err) + return nil, fmt.Errorf("error while waiting for the users %v", err) } - return ret, nil + users := rawUsers.(map[string]models.User) + + return users, nil } func kubermaticProjectConfiguredUsers(d *schema.ResourceData) map[string]models.User { diff --git a/kubermatic/resource_service_account.go b/kubermatic/resource_service_account.go index e831929..ed7b11a 100644 --- a/kubermatic/resource_service_account.go +++ b/kubermatic/resource_service_account.go @@ -3,13 +3,20 @@ package kubermatic import ( "fmt" "strings" + "time" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/kubermatic/go-kubermatic/client/serviceaccounts" "github.com/kubermatic/go-kubermatic/models" ) +const ( + serviceAccountReady = "Ready" + serviceAccountUnavailable = "Unavailable" +) + func resourceServiceAccount() *schema.Resource { return &schema.Resource{ Create: resourceServiceAccountCreate, @@ -118,16 +125,38 @@ func resourceServiceAccountRead(d *schema.ResourceData, m interface{}) error { } func kubermaticServiceAccountList(k *kubermaticProviderMeta, projectID string) ([]*models.ServiceAccount, error) { - p := serviceaccounts.NewListServiceAccountsParams() - p.SetProjectID(projectID) - r, err := k.client.Serviceaccounts.ListServiceAccounts(p, k.auth) + listStateConf := &resource.StateChangeConf{ + Pending: []string{ + serviceAccountUnavailable, + }, + Target: []string{ + serviceAccountReady, + }, + Refresh: func() (interface{}, string, error) { + p := serviceaccounts.NewListServiceAccountsParams() + p.SetProjectID(projectID) + s, err := k.client.Serviceaccounts.ListServiceAccounts(p, k.auth) + if err != nil { + // wait for the RBACs + if _, ok := err.(*serviceaccounts.ListServiceAccountsForbidden); ok { + return s, usersUnavailable, nil + } + return nil, serviceAccountUnavailable, fmt.Errorf("can not get service accounts: %v", err) + } + return s, serviceAccountReady, nil + }, + Timeout: 20 * time.Second, + Delay: requestDelay, + } + + s, err := listStateConf.WaitForState() if err != nil { - if e, ok := err.(*serviceaccounts.ListServiceAccountsDefault); ok && errorMessage(e.Payload) != "" { - return nil, fmt.Errorf("unable to get service account: %s", errorMessage(e.Payload)) - } - return nil, fmt.Errorf("unable to get service account: %v", err) + k.log.Debugf("error while waiting for the service account %v", err) + return nil, fmt.Errorf("error while waiting for the service account %v", err) } - return r.Payload, nil + sa := s.(*serviceaccounts.ListServiceAccountsOK) + + return sa.Payload, nil } func resourceServiceAccountUpdate(d *schema.ResourceData, m interface{}) error { diff --git a/kubermatic/resource_service_account_token.go b/kubermatic/resource_service_account_token.go index 26814ad..2e33c9c 100644 --- a/kubermatic/resource_service_account_token.go +++ b/kubermatic/resource_service_account_token.go @@ -3,12 +3,19 @@ package kubermatic import ( "fmt" "strings" + "time" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/kubermatic/go-kubermatic/client/tokens" "github.com/kubermatic/go-kubermatic/models" ) +const ( + serviceAccountTokenReady = "Ready" + serviceAccountTokenUnavailable = "Unavailable" +) + func resourceServiceAccountToken() *schema.Resource { return &schema.Resource{ Create: resourceServiceAccountTokenCreate, @@ -91,25 +98,43 @@ func resourceServiceAccountTokenCreate(d *schema.ResourceData, m interface{}) er func resourceServiceAccountTokenRead(d *schema.ResourceData, m interface{}) error { k := m.(*kubermaticProviderMeta) - projectID, serviceAccountID, tokenID, err := kubermaticServiceAccountTokenParseID(d.Id()) if err != nil { return err } + listStateConf := &resource.StateChangeConf{ + Pending: []string{ + serviceAccountTokenUnavailable, + }, + Target: []string{ + serviceAccountTokenReady, + }, + Refresh: func() (interface{}, string, error) { + p := tokens.NewListServiceAccountTokensParams() + p.SetProjectID(projectID) + p.SetServiceAccountID(serviceAccountID) + t, err := k.client.Tokens.ListServiceAccountTokens(p, k.auth) + if err != nil { + // wait for the RBACs + if _, ok := err.(*tokens.ListServiceAccountTokensForbidden); ok { + return t, serviceAccountTokenUnavailable, nil + } + return nil, serviceAccountTokenUnavailable, err + } + return t, serviceAccountTokenReady, nil + }, + Timeout: 30 * time.Second, + Delay: requestDelay, + } - p := tokens.NewListServiceAccountTokensParams() - p.SetProjectID(projectID) - p.SetServiceAccountID(serviceAccountID) - r, err := k.client.Tokens.ListServiceAccountTokens(p, k.auth) + s, err := listStateConf.WaitForState() if err != nil { - if e, ok := err.(*tokens.ListServiceAccountTokensDefault); ok && errorMessage(e.Payload) != "" { - return fmt.Errorf("unable to get token: %s", errorMessage(e.Payload)) - } - return fmt.Errorf("unable to get token: %v", err) + k.log.Debugf("error while waiting for the tokens: %v", err) + return fmt.Errorf("error while waiting for the tokens: %v", err) } - + saTokens := s.(*tokens.ListServiceAccountTokensOK) var token *models.PublicServiceAccountToken - for _, v := range r.Payload { + for _, v := range saTokens.Payload { if v.ID == tokenID { token = v break diff --git a/kubermatic/resource_sshkey.go b/kubermatic/resource_sshkey.go index 27ee252..59968a5 100644 --- a/kubermatic/resource_sshkey.go +++ b/kubermatic/resource_sshkey.go @@ -3,13 +3,20 @@ package kubermatic import ( "fmt" "strings" + "time" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/kubermatic/go-kubermatic/client/project" "github.com/kubermatic/go-kubermatic/models" ) +const ( + sshReady = "Ready" + sshUnavailable = "Unavailable" +) + func resourceSSHKey() *schema.Resource { return &schema.Resource{ Create: resourceSSHKeyCreate, @@ -65,14 +72,40 @@ func resourceSSHKeyCreate(d *schema.ResourceData, m interface{}) error { func resourceSSHKeyRead(d *schema.ResourceData, m interface{}) error { k := m.(*kubermaticProviderMeta) - p := project.NewListSSHKeysParams() - p.SetProjectID(d.Get("project_id").(string)) - ret, err := k.client.Project.ListSSHKeys(p, k.auth) + + listStateConf := &resource.StateChangeConf{ + Pending: []string{ + sshUnavailable, + }, + Target: []string{ + sshReady, + }, + Refresh: func() (interface{}, string, error) { + p := project.NewListSSHKeysParams() + p.SetProjectID(d.Get("project_id").(string)) + k, err := k.client.Project.ListSSHKeys(p, k.auth) + if err != nil { + // wait for the RBACs + if _, ok := err.(*project.ListSSHKeysForbidden); ok { + return k, sshUnavailable, nil + } + return nil, sshUnavailable, fmt.Errorf("can not get ssh keys: %v", err) + } + return k, sshReady, nil + }, + Timeout: 20 * time.Second, + Delay: requestDelay, + } + + s, err := listStateConf.WaitForState() if err != nil { - return fmt.Errorf("unable to list SSH keys: %s", getErrorResponse(err)) + k.log.Debugf("error while waiting for the SSH keys: %v", err) + return fmt.Errorf("error while waiting for the SSH keys: %v", err) } + keys := s.(*project.ListSSHKeysOK) + var sshkey *models.SSHKey - for _, r := range ret.Payload { + for _, r := range keys.Payload { if r.ID == d.Id() { sshkey = r break diff --git a/kubermatic/validation_node_deployment_test.go b/kubermatic/validation_node_deployment_test.go index 72724da..ea2d1ac 100644 --- a/kubermatic/validation_node_deployment_test.go +++ b/kubermatic/validation_node_deployment_test.go @@ -12,80 +12,140 @@ import ( func TestAccKubermaticNodeDeployment_ValidationAgainstCluster(t *testing.T) { testName := randomTestName() - username := os.Getenv(testEnvOpenstackUsername) - password := os.Getenv(testEnvOpenstackPassword) - tenant := os.Getenv(testEnvOpenstackTenant) - nodeDC := os.Getenv(testEnvOpenstackNodeDC) - image := os.Getenv(testEnvOpenstackImage) - flavor := os.Getenv(testEnvOpenstackFlavor) + accessKeyID := os.Getenv(testEnvAWSAccessKeyID) + accessKeySecret := os.Getenv(testAWSSecretAccessKey) + vpcID := os.Getenv(testEnvAWSVPCID) + nodeDC := os.Getenv(testEnvAWSNodeDC) + k8sVersion17 := os.Getenv(testEnvK8sVersion) + instanceType := os.Getenv(testEnvAWSInstanceType) + subnetID := os.Getenv(testEnvAWSSubnetID) + availabilityZone := os.Getenv(testEnvAWSAvailabilityZone) + diskSize := os.Getenv(testEnvAWSDiskSize) - k8sVersion17 := os.Getenv(testEnvK8sVersion17) - kubeletVersion16 := os.Getenv(testEnvK8sVersion16) unavailableVersion := "1.12.1" bigVersion := "3.0.0" - existingClusterID := os.Getenv(testEnvExistingClusterID) - - azure := ` - azure { - size = "2" - }` - openstack := fmt.Sprintf(` - openstack { - flavor = "%s" - image = "%s" - }`, flavor, image) - resource.Test(t, resource.TestCase{ PreCheck: func() { - testAccPreCheckForOpenstack(t) - testAccPreCheckExistingCluster(t) + testAccPreCheckForAWS(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckKubermaticNodeDeploymentDestroy, + CheckDestroy: testAccCheckKubermaticClusterDestroy, Steps: []resource.TestStep{ { - PlanOnly: true, - Config: testAccCheckKubermaticNodeDeploymentBasic(testName, nodeDC, username, password, tenant, k8sVersion17, kubeletVersion16, image, flavor), - ExpectNonEmptyPlan: true, + Config: testAccCheckKubermaticNodeDeploymentBasicValidation(testName, accessKeyID, accessKeySecret, vpcID, nodeDC, instanceType, subnetID, availabilityZone, diskSize, k8sVersion17, k8sVersion17), }, { - PlanOnly: true, - Config: testAccCheckKubermaticNodeDeploymentBasicValidation(existingClusterID, testName, kubeletVersion16, azure), - ExpectError: regexp.MustCompile(`provider for node deployment must \(.*\) match cluster provider \(.*\)`), + Config: testAccCheckKubermaticNodeDeploymentBasicValidation(testName, accessKeyID, accessKeySecret, vpcID, nodeDC, instanceType, subnetID, availabilityZone, diskSize, k8sVersion17, unavailableVersion), + ExpectError: regexp.MustCompile(fmt.Sprintf(`unknown version for node deployment %s, available versions`, unavailableVersion)), }, { - PlanOnly: true, - Config: testAccCheckKubermaticNodeDeploymentBasicValidation(existingClusterID, testName, bigVersion, azure), - ExpectError: regexp.MustCompile(`cannot be greater than cluster version`), + Config: testAccCheckKubermaticNodeDeploymentTypeValidation(testName, accessKeyID, accessKeySecret, vpcID, nodeDC, k8sVersion17, k8sVersion17), + ExpectError: regexp.MustCompile(`provider for node deployment must \(.*\) match cluster provider \(.*\)`), }, { - PlanOnly: true, - Config: testAccCheckKubermaticNodeDeploymentBasicValidation(existingClusterID, testName, unavailableVersion, openstack), - ExpectError: regexp.MustCompile(fmt.Sprintf(`unknown version for node deployment %s, available versions`, unavailableVersion)), + Config: testAccCheckKubermaticNodeDeploymentBasicValidation(testName, accessKeyID, accessKeySecret, vpcID, nodeDC, instanceType, subnetID, availabilityZone, diskSize, k8sVersion17, bigVersion), + ExpectError: regexp.MustCompile(`cannot be greater than cluster version`), }, }, }) } -func testAccCheckKubermaticNodeDeploymentBasicValidation(clusterID, testName, kubeletVersion, provider string) string { +func testAccCheckKubermaticNodeDeploymentBasicValidation(n, keyID, keySecret, vpcID, nodeDC, instanceType, subnetID, availabilityZone, diskSize, k8sVersion, kubeletVersion string) string { return fmt.Sprintf(` + resource "kubermatic_project" "acctest_project" { + name = "%s" + } + + resource "kubermatic_cluster" "acctest_cluster" { + name = "%s" + dc_name = "%s" + project_id = kubermatic_project.acctest_project.id + + spec { + version = "%s" + cloud { + aws { + access_key_id = "%s" + secret_access_key = "%s" + vpc_id = "%s" + } + } + } + } + + resource "kubermatic_node_deployment" "acctest_nd" { + cluster_id = kubermatic_cluster.acctest_cluster.id + name = "%s" + spec { + replicas = 1 + template { + cloud { + aws { + instance_type = "%s" + disk_size = "%s" + volume_type = "standard" + subnet_id = "%s" + availability_zone = "%s" + assign_public_ip = true + } + } + operating_system { + ubuntu { + dist_upgrade_on_boot = false + } + } + versions { + kubelet = "%s" + } + } + } + }`, n, n, nodeDC, k8sVersion, keyID, keySecret, vpcID, n, instanceType, diskSize, subnetID, availabilityZone, kubeletVersion) +} + +func testAccCheckKubermaticNodeDeploymentTypeValidation(n, keyID, keySecret, vpcID, nodeDC, k8sVersion, kubeletVersion string) string { + return fmt.Sprintf(` + resource "kubermatic_project" "acctest_project" { + name = "%s" + } + + resource "kubermatic_cluster" "acctest_cluster" { + name = "%s" + dc_name = "%s" + project_id = kubermatic_project.acctest_project.id + + spec { + version = "%s" + cloud { + aws { + access_key_id = "%s" + secret_access_key = "%s" + vpc_id = "%s" + } + } + } + } + resource "kubermatic_node_deployment" "acctest_nd" { - cluster_id = "%s" + cluster_id = kubermatic_cluster.acctest_cluster.id name = "%s" spec { replicas = 1 template { cloud { - %s + azure { + size = 2 + } } operating_system { - ubuntu {} + ubuntu { + dist_upgrade_on_boot = false + } } versions { kubelet = "%s" } } } - }`, clusterID, testName, provider, kubeletVersion) + }`, n, n, nodeDC, k8sVersion, keyID, keySecret, vpcID, n, kubeletVersion) }