From 3e1a6f0a7fc7b083e19fa7cfab6952d3ad343ae5 Mon Sep 17 00:00:00 2001 From: erda-bot <81558540+erda-bot@users.noreply.github.com> Date: Fri, 20 Aug 2021 17:57:30 +0800 Subject: [PATCH] feature: cmp import cluster use single chart (#1516) (#1519) Co-authored-by: Asher Liu --- ...le-cluster-init => Dockerfile-cluster-ops} | 0 cmd/{cluster-init => cluster-ops}/main.go | 4 +- conf/cluster-init/cluster-init.yaml | 1 - conf/cluster-ops/cluster-ops.yaml | 1 + modules/cluster-init/config/config.go | 32 -- .../client/client.go | 58 +-- modules/cluster-ops/config/config.go | 31 ++ .../{cluster-init => cluster-ops}/provider.go | 10 +- modules/cmp/conf/conf.go | 62 ++- modules/cmp/endpoints/cluster.go | 15 - modules/cmp/endpoints/endpoints.go | 1 - modules/cmp/impl/clusters/cluster_hook.go | 63 --- modules/cmp/impl/clusters/clusterinfo.go | 88 ++-- modules/cmp/impl/clusters/import_cluster.go | 475 ++++++++---------- modules/cmp/impl/clusters/proxy-deploy.go | 34 +- modules/cmp/impl/clusters/update_cluster.go | 5 +- .../cmp/impl/clusters/upgrade_edge_cluster.go | 4 +- modules/cmp/initialize.go | 16 - modules/cmp/services/kubernetes/kubernetes.go | 110 ---- 19 files changed, 361 insertions(+), 649 deletions(-) rename build/dockerfiles/{Dockerfile-cluster-init => Dockerfile-cluster-ops} (100%) rename cmd/{cluster-init => cluster-ops}/main.go (88%) delete mode 100644 conf/cluster-init/cluster-init.yaml create mode 100644 conf/cluster-ops/cluster-ops.yaml delete mode 100644 modules/cluster-init/config/config.go rename modules/{cluster-init => cluster-ops}/client/client.go (75%) create mode 100644 modules/cluster-ops/config/config.go rename modules/{cluster-init => cluster-ops}/provider.go (83%) delete mode 100644 modules/cmp/impl/clusters/cluster_hook.go delete mode 100644 modules/cmp/services/kubernetes/kubernetes.go diff --git a/build/dockerfiles/Dockerfile-cluster-init b/build/dockerfiles/Dockerfile-cluster-ops similarity index 100% rename from build/dockerfiles/Dockerfile-cluster-init rename to build/dockerfiles/Dockerfile-cluster-ops diff --git a/cmd/cluster-init/main.go b/cmd/cluster-ops/main.go similarity index 88% rename from cmd/cluster-init/main.go rename to cmd/cluster-ops/main.go index 29f9a921b98..63394a4ae79 100644 --- a/cmd/cluster-init/main.go +++ b/cmd/cluster-ops/main.go @@ -18,11 +18,11 @@ import ( "github.com/erda-project/erda/pkg/common" _ "github.com/erda-project/erda-infra/providers" - _ "github.com/erda-project/erda/modules/cluster-init" + _ "github.com/erda-project/erda/modules/cluster-ops" ) func main() { common.Run(&servicehub.RunOptions{ - ConfigFile: "conf/cluster-init/cluster-init.yaml", + ConfigFile: "conf/cluster-ops/cluster-ops.yaml", }) } diff --git a/conf/cluster-init/cluster-init.yaml b/conf/cluster-init/cluster-init.yaml deleted file mode 100644 index 059841a29b5..00000000000 --- a/conf/cluster-init/cluster-init.yaml +++ /dev/null @@ -1 +0,0 @@ -cluster-init: diff --git a/conf/cluster-ops/cluster-ops.yaml b/conf/cluster-ops/cluster-ops.yaml new file mode 100644 index 00000000000..5a8e2fa3e8d --- /dev/null +++ b/conf/cluster-ops/cluster-ops.yaml @@ -0,0 +1 @@ +cluster-ops: diff --git a/modules/cluster-init/config/config.go b/modules/cluster-init/config/config.go deleted file mode 100644 index 5e4e0b253ff..00000000000 --- a/modules/cluster-init/config/config.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2021 Terminus, Inc. -// -// This program is free software: you can use, redistribute, and/or modify -// it under the terms of the GNU Affero General Public License, version 3 -// or later ("AGPL"), as published by the Free Software Foundation. -// -// This program is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package config - -type Config struct { - Debug bool `env:"DEBUG" default:"false" desc:"enable debug logging"` - RepoURL string `env:"HELM_REPO_URL" desc:"helm repo url"` - Reinstall bool `env:"REINSTALL" desc:"reinstall erda comp"` - Version string `env:"ERDA_CHART_VERSION" desc:"erda chart version"` - InstallMode string `env:"INSTALL_MODE" desc:"install mode, remote or local"` - RepoMode string `env:"REPO_MODE" desc:"get chart mode, download or use build in charts"` - TargetCluster string `env:"TARGET_CLUSTER" desc:"special when CREDENTIAL_FROM=CLUSTER_MANAGER"` - NodeLabels string `env:"NODE_LABELS" desc:"node labels after install erda"` - ChartErdaBaseValues string `env:"CHART_ERDA_BASE_VALUES" desc:"provide erda base values"` - ChartErdaAddonsValues string `env:"CHART_ERDA_ADDONS_VALUES" desc:"provide erda addons values"` - ChartErdaValues string `env:"CHART_ERDA_VALUES" desc:"provide erda values"` - // HELM_NAMESPACE: helm deploy namespace - // HELM_REPO_URL: helm repo address - // HELM_REPOSITORY_CONFIG: helm repository store path - // HELM_REPOSITORY_CACHE: helm charts cache path -} diff --git a/modules/cluster-init/client/client.go b/modules/cluster-ops/client/client.go similarity index 75% rename from modules/cluster-init/client/client.go rename to modules/cluster-ops/client/client.go index d1e19496c95..a73ee1882d2 100644 --- a/modules/cluster-init/client/client.go +++ b/modules/cluster-ops/client/client.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/rest" "github.com/erda-project/erda/bundle" - "github.com/erda-project/erda/modules/cluster-init/config" + "github.com/erda-project/erda/modules/cluster-ops/config" erdahelm "github.com/erda-project/erda/pkg/helm" kc "github.com/erda-project/erda/pkg/k8sclient/config" ) @@ -33,11 +33,6 @@ import ( const ( defaultRepoName = "stable" InstallModeRemote = "REMOTE" - RepoModeRemote = "REMOTE" - RepoModeLocal = "LOCAL" - LocalRepoPath = "/app/charts" - ErdaBaseCharts = "erda-base" - ErdaAddonsCharts = "erda-addons" ErdaCharts = "erda" ) @@ -65,7 +60,7 @@ func WithConfig(cfg *config.Config) Option { func (c *Client) Execute() error { logrus.Debugf("load config: %+v", c.config) - opts, err := c.newHelmClientOptions() + opts, err := c.genHelmClientOptions() if err != nil { return fmt.Errorf("get helm client error: %v", err) } @@ -75,14 +70,16 @@ func (c *Client) Execute() error { return err } - switch strings.ToUpper(c.config.RepoMode) { - case RepoModeRemote: - // TODO: support repo auth info. - e := &repo.Entry{Name: defaultRepoName, URL: c.config.RepoURL} + // TODO: support repo auth info. + e := &repo.Entry{ + Name: defaultRepoName, + URL: c.config.RepoURL, + Username: c.config.RepoUsername, + Password: c.config.RepoPassword, + } - if err = hc.AddOrUpdateRepo(e); err != nil { - return err - } + if err = hc.AddOrUpdateRepo(e); err != nil { + return err } if c.config.Reinstall { @@ -96,7 +93,8 @@ func (c *Client) Execute() error { LocalRepoName: defaultRepoName, } - if err := m.Execute(); err != nil { + if err = m.Execute(); err != nil { + logrus.Errorf("execute uninstall error: %v", err) return err } } @@ -107,13 +105,14 @@ func (c *Client) Execute() error { LocalRepoName: defaultRepoName, } - if err := m.Execute(); err != nil { + if err = m.Execute(); err != nil { + logrus.Errorf("execute error: %v", err) return err } // Label node only local mode // TODO: support label remote with rest.config - if strings.ToUpper(c.config.RepoMode) != InstallModeRemote { + if strings.ToUpper(c.config.InstallMode) != InstallModeRemote { rc, err := rest.InClusterConfig() if err != nil { logrus.Errorf("get incluster rest config error: %v", err) @@ -153,8 +152,8 @@ func (c *Client) Execute() error { return nil } -// newHelmClientOptions create helm client options -func (c *Client) newHelmClientOptions() ([]erdahelm.Option, error) { +// genHelmClientOptions create helm client options +func (c *Client) genHelmClientOptions() ([]erdahelm.Option, error) { opts := make([]erdahelm.Option, 0) switch strings.ToUpper(c.config.InstallMode) { @@ -173,36 +172,17 @@ func (c *Client) newHelmClientOptions() ([]erdahelm.Option, error) { opts = append(opts, erdahelm.WithRESTClientGetter(erdahelm.NewRESTClientGetterImpl(rc))) } - switch strings.ToUpper(c.config.RepoMode) { - case RepoModeLocal: - opts = append(opts, erdahelm.WithLocalChartDiscoverDir(LocalRepoPath)) - } - return opts, nil } func (c *Client) getInitCharts() []*erdahelm.ChartSpec { return []*erdahelm.ChartSpec{ - { - ReleaseName: ErdaBaseCharts, - ChartName: ErdaBaseCharts, - Version: c.config.Version, - Action: erdahelm.ActionInstall, - Values: c.config.ChartErdaBaseValues, - }, - { - ReleaseName: ErdaAddonsCharts, - ChartName: ErdaAddonsCharts, - Version: c.config.Version, - Action: erdahelm.ActionInstall, - Values: c.config.ChartErdaAddonsValues, - }, { ReleaseName: ErdaCharts, ChartName: ErdaCharts, Version: c.config.Version, Action: erdahelm.ActionInstall, - Values: c.config.ChartErdaValues, + Values: c.config.SetValues, }, } } diff --git a/modules/cluster-ops/config/config.go b/modules/cluster-ops/config/config.go new file mode 100644 index 00000000000..2828c59dd00 --- /dev/null +++ b/modules/cluster-ops/config/config.go @@ -0,0 +1,31 @@ +// Copyright (c) 2021 Terminus, Inc. +// +// This program is free software: you can use, redistribute, and/or modify +// it under the terms of the GNU Affero General Public License, version 3 +// or later ("AGPL"), as published by the Free Software Foundation. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package config + +type Config struct { + Debug bool `env:"DEBUG" default:"false" desc:"enable debug logging"` + RepoURL string `env:"HELM_REPO_URL" desc:"helm repo url"` + RepoUsername string `env:"HELM_REPO_USERNAME" desc:"helm repo url"` + RepoPassword string `env:"HELM_REPO_PASSWORD" desc:"helm repo url"` + Reinstall bool `env:"REINSTALL" default:"false" desc:"reinstall erda comp"` + Version string `env:"ERDA_CHART_VERSION" desc:"erda chart version"` + SetValues string `env:"ERDA_CHART_VALUES" desc:"provide erda values"` + InstallMode string `env:"INSTALL_MODE" default:"local" desc:"install mode, remote or local"` + TargetCluster string `env:"TARGET_CLUSTER" desc:"special when CREDENTIAL_FROM=CLUSTER_MANAGER"` + NodeLabels string `env:"NODE_LABELS" desc:"node labels after install erda"` + // HELM_NAMESPACE: helm deploy namespace + // HELM_REPO_URL: helm repo address + // HELM_REPOSITORY_CONFIG: helm repository store path + // HELM_REPOSITORY_CACHE: helm charts cache path +} diff --git a/modules/cluster-init/provider.go b/modules/cluster-ops/provider.go similarity index 83% rename from modules/cluster-init/provider.go rename to modules/cluster-ops/provider.go index 554b67d7abf..4730226f449 100644 --- a/modules/cluster-init/provider.go +++ b/modules/cluster-ops/provider.go @@ -20,8 +20,8 @@ import ( "github.com/sirupsen/logrus" "github.com/erda-project/erda-infra/base/servicehub" - "github.com/erda-project/erda/modules/cluster-init/client" - "github.com/erda-project/erda/modules/cluster-init/config" + "github.com/erda-project/erda/modules/cluster-ops/client" + "github.com/erda-project/erda/modules/cluster-ops/config" ) type provider struct { @@ -43,9 +43,9 @@ func (p *provider) Run(ctx context.Context) error { } func init() { - servicehub.Register("cluster-init", &servicehub.Spec{ - Services: []string{"cluster-init"}, - Description: "cluster init", + servicehub.Register("cluster-ops", &servicehub.Spec{ + Services: []string{"cluster-ops"}, + Description: "cluster ops", ConfigFunc: func() interface{} { return &config.Config{} }, diff --git a/modules/cmp/conf/conf.go b/modules/cmp/conf/conf.go index 2e0c6c6a1bc..85d4d186af0 100644 --- a/modules/cmp/conf/conf.go +++ b/modules/cmp/conf/conf.go @@ -41,10 +41,18 @@ type Conf struct { OryKratosAddr string `default:"kratos:4433" env:"KRATOS_ADDR"` OryKratosPrivateAddr string `default:"kratos:4434" env:"KRATOS_PRIVATE_ADDR"` - ErdaNamespace string `default:"erda-system" env:"ERDA_NAMESPACE"` - ErdaHelmChartVersion string `default:"0.1.0" env:"ERDA_HELM_CHART_VERSION"` - ReleaseRepo string `default:"registry.erda.cloud/erda" env:"RELEASE_REPO"` - DialerPublicAddr string `env:"CLUSTER_DIALER_PUBLIC_ADDR"` + ReleaseRegistry string `env:"RELEASE_REGISTRY" default:"registry.erda.cloud/erda"` + ClusterInitVersion string `env:"CLUSTER_INIT_VERSION" default:"0.1"` + + HelmChartRepoURL string `env:"HELM_REPO_URL"` + HelmChartRepoUserName string `env:"HELM_REPO_USERNAME"` + HelmChartRepoPassword string `env:"HELM_REPO_PASSWORD"` + + ErdaNamespace string `env:"DICE_NAMESPACE" default:"erda-system"` + ErdaVersion string `env:"DICE_VERSION"` + ErdaProtocol string `env:"DICE_PROTOCOL"` + ErdaClusterName string `env:"DICE_CLUSTER_NAME"` + ErdaDomain string `env:"DICE_ROOT_DOMAIN"` } var cfg Conf @@ -59,16 +67,6 @@ func ListenAddr() string { return cfg.ListenAddr } -// SoldierAddr return the address of soldier. -func SoldierAddr() string { - return cfg.SoldierAddr -} - -// SchedulerAddr Return the address of scheduler. -func SchedulerAddr() string { - return cfg.SchedulerAddr -} - // Debug Return the switch of debug. func Debug() bool { return cfg.Debug @@ -136,18 +134,42 @@ func OryCompatibleClientSecret() string { return "" } +func ReleaseRegistry() string { + return cfg.ReleaseRegistry +} + +func ClusterInitVersion() string { + return cfg.ClusterInitVersion +} + +func HelmRepoURL() string { + return cfg.HelmChartRepoURL +} + +func HelmRepoUsername() string { + return cfg.HelmChartRepoUserName +} + +func HelmRepoPassword() string { + return cfg.HelmChartRepoPassword +} + func ErdaNamespace() string { return cfg.ErdaNamespace } -func ErdaHelmChartVersion() string { - return cfg.ErdaHelmChartVersion +func ErdaVersion() string { + return cfg.ErdaVersion +} + +func ErdaProtocol() string { + return cfg.ErdaProtocol } -func ReleaseRepo() string { - return cfg.ReleaseRepo +func ErdaClusterName() string { + return cfg.ErdaClusterName } -func DialerPublicAddr() string { - return cfg.DialerPublicAddr +func ErdaDomain() string { + return cfg.ErdaDomain } diff --git a/modules/cmp/endpoints/cluster.go b/modules/cmp/endpoints/cluster.go index dfa28ed45d2..24f4e421ae0 100644 --- a/modules/cmp/endpoints/cluster.go +++ b/modules/cmp/endpoints/cluster.go @@ -535,18 +535,3 @@ func (e *Endpoints) InitCluster(ctx context.Context, w http.ResponseWriter, r *h return nil } - -func (e *Endpoints) ClusterHook(ctx context.Context, r *http.Request, vars map[string]string) (resp httpserver.Responser, err error) { - req := apistructs.ClusterEvent{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - errstr := fmt.Sprintf("decode clusterhook request fail: %v", err) - logrus.Error(errstr) - return httpserver.HTTPResponse{Status: http.StatusBadRequest, Content: errstr}, nil - } - if err := e.clusters.Hook(&req); err != nil { - errstr := fmt.Sprintf("failed to handle cluster event: %v", err) - logrus.Error(errstr) - return httpserver.HTTPResponse{Status: http.StatusInternalServerError, Content: errstr}, nil - } - return httpserver.HTTPResponse{Status: http.StatusOK}, nil -} diff --git a/modules/cmp/endpoints/endpoints.go b/modules/cmp/endpoints/endpoints.go index 964278468ed..b12e28b8836 100644 --- a/modules/cmp/endpoints/endpoints.go +++ b/modules/cmp/endpoints/endpoints.go @@ -104,7 +104,6 @@ func (e *Endpoints) Routes() []httpserver.Endpoint { {Path: "/api/cluster", Method: http.MethodGet, Handler: auth(i18nPrinter(e.ClusterInfo))}, {Path: "/api/cluster/init-command", Method: http.MethodGet, WriterHandler: e.InitCluster}, {Path: "/api/org-cluster-info", Method: http.MethodGet, Handler: auth(i18nPrinter(e.OrgClusterInfo))}, - {Path: "/api/clusterhook", Method: http.MethodPost, Handler: auth(i18nPrinter(e.ClusterHook))}, // officer apis {Path: "/api/clusters/{clusterName}/registry/readonly", Method: http.MethodGet, Handler: e.RegistryReadonly}, diff --git a/modules/cmp/impl/clusters/cluster_hook.go b/modules/cmp/impl/clusters/cluster_hook.go deleted file mode 100644 index 445e46a4408..00000000000 --- a/modules/cmp/impl/clusters/cluster_hook.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2021 Terminus, Inc. -// -// This program is free software: you can use, redistribute, and/or modify -// it under the terms of the GNU Affero General Public License, version 3 -// or later ("AGPL"), as published by the Free Software Foundation. -// -// This program is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package clusters - -import ( - "fmt" - - "github.com/sirupsen/logrus" - - "github.com/erda-project/erda/apistructs" -) - -// Hook deal k8sClient object in mem -func (c *Clusters) Hook(clusterEvent *apistructs.ClusterEvent) error { - if clusterEvent == nil { - return fmt.Errorf("nil clusterEvent object") - } - - clusterName := clusterEvent.Content.Name - - switch clusterEvent.Action { - case apistructs.ClusterActionCreate: - logrus.Debugf("cluster %s action before create, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - if err := c.k8s.CreateClient(clusterEvent.Content.Name); err != nil { - logrus.Errorf("cluster %s action create error: %v", clusterName, err) - return err - } - logrus.Debugf("cluster %s action after create, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - return nil - case apistructs.ClusterActionUpdate: - logrus.Debugf("cluster %s action before update, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - if err := c.k8s.UpdateClient(clusterEvent.Content.Name); err != nil { - logrus.Errorf("cluster %s action update error: %v", clusterName, err) - return err - } - logrus.Debugf("cluster %s action after update, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - return nil - case apistructs.ClusterActionDelete: - logrus.Debugf("cluster %s action before delete, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - c.k8s.RemoveClient(clusterEvent.Content.Name) - logrus.Debugf("cluster %s action after delete, current clients map: %v", - clusterName, c.k8s.GetCacheClients()) - return nil - default: - return fmt.Errorf("invaild cluster event action: %s", clusterEvent.Action) - } -} diff --git a/modules/cmp/impl/clusters/clusterinfo.go b/modules/cmp/impl/clusters/clusterinfo.go index b36d65af64e..867a0e2534c 100644 --- a/modules/cmp/impl/clusters/clusterinfo.go +++ b/modules/cmp/impl/clusters/clusterinfo.go @@ -18,14 +18,14 @@ import ( "encoding/json" "fmt" "os" - "strings" "github.com/sirupsen/logrus" "golang.org/x/text/message" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "github.com/erda-project/erda/apistructs" + "github.com/erda-project/erda/modules/cmp/conf" + "github.com/erda-project/erda/pkg/k8sclient" "github.com/erda-project/erda/pkg/strutil" ) @@ -36,12 +36,12 @@ const ( statusInitializing = "initializing" statusInitializeError = "initialize error" statusUnknown = "unknown" - diceOperator = "/apis/dice.terminus.io/v1beta1/namespaces/default/dices/dice" - erdaOperator = "/apis/erda.terminus.io/v1beta1/namespaces/default/erdas/erda" ) var ( - checkCRDs = []string{erdaOperator, diceOperator} + diceOperator = "/apis/dice.terminus.io/v1beta1/namespaces/%s/dices/dice" + erdaOperator = "/apis/erda.terminus.io/v1beta1/namespaces/%s/erdas/erda" + checkCRDs = []string{erdaOperator, diceOperator} ) func (c *Clusters) ClusterInfo(ctx context.Context, orgID uint64, clusterNames []string) ([]map[string]map[string]apistructs.NameValue, error) { @@ -89,29 +89,8 @@ func (c *Clusters) ClusterInfo(ctx context.Context, orgID uint64, clusterNames [ urlInfo["nexus"] = apistructs.NameValue{Name: "nexus", Value: ci.Get(apistructs.NEXUS_ADDR)} } - cs, err := c.k8s.GetInClusterClient() + kc, err := k8sclient.NewWithTimeOut(clusterName, getClusterTimeout) if err != nil { - logrus.Error(err) - } else { - pod, err := cs.CoreV1().Pods(getPlatformNamespace()).List(context.Background(), metav1.ListOptions{ - LabelSelector: labels.Set(map[string]string{"job-name": generateInitJobName(orgID, clusterName)}).String(), - }) - if err == nil { - if len(pod.Items) > 0 { - containerInfoPart := strings.Split(pod.Items[0].Status.ContainerStatuses[0].ContainerID, "://") - if len(containerInfoPart) >= 2 { - baseInfo["clusterInitContainerID"] = apistructs.NameValue{ - Name: i18n.Sprintf("cluster init container id"), - Value: containerInfoPart[1], - } - } - } - } else { - logrus.Error(err) - } - } - - if kc, err := c.k8s.GetClient(clusterName); err != nil { logrus.Errorf("get k8sclient error: %v", err) result := map[string]map[string]apistructs.NameValue{ "basic": baseInfo, @@ -120,15 +99,15 @@ func (c *Clusters) ClusterInfo(ctx context.Context, orgID uint64, clusterNames [ resultList = append(resultList, result) continue - } else { - nodes, err := kc.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - logrus.Error(err) - } - baseInfo["nodeCount"] = apistructs.NameValue{Name: i18n.Sprintf("node count"), Value: len(nodes.Items)} } - if status, err := c.getClusterStatus(clusterMetaData); err != nil { + nodes, err := kc.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil { + logrus.Error(err) + } + baseInfo["nodeCount"] = apistructs.NameValue{Name: i18n.Sprintf("node count"), Value: len(nodes.Items)} + + if status, err := c.getClusterStatus(kc, clusterMetaData); err != nil { logrus.Errorf("get cluster status error: %v", err) } else { baseInfo["clusterStatus"] = apistructs.NameValue{Name: i18n.Sprintf("cluster status"), Value: status} @@ -144,10 +123,14 @@ func (c *Clusters) ClusterInfo(ctx context.Context, orgID uint64, clusterNames [ return resultList, nil } -func (c *Clusters) getClusterStatus(meta *apistructs.ClusterInfo) (string, error) { +func (c *Clusters) getClusterStatus(kc *k8sclient.K8sClient, meta *apistructs.ClusterInfo) (string, error) { + if kc == nil || kc.ClientSet == nil { + return "", fmt.Errorf("kubernetes client is nil") + } + // if manage config is nil, cluster import with inet or other if meta.ManageConfig == nil { - return statusUnknown, nil + return statusOffline, nil } switch meta.ManageConfig.Type { @@ -165,29 +148,35 @@ func (c *Clusters) getClusterStatus(meta *apistructs.ClusterInfo) (string, error } } - client, err := c.k8s.GetClient(meta.Name) - if err != nil { - return statusUnknown, err - } - ec := &apistructs.DiceCluster{} - var res []byte + + var ( + res []byte + err error + resourceExist bool + ) for _, selfLink := range checkCRDs { - res, err = client.ClientSet.RESTClient().Get(). - AbsPath(selfLink). + res, err = kc.ClientSet.RESTClient().Get(). + AbsPath(fmt.Sprintf(selfLink, conf.ErdaNamespace())). DoRaw(context.Background()) if err != nil { logrus.Error(err) continue } + resourceExist = true break } - if err = json.Unmarshal(res, &ec); err != nil { + if !resourceExist { return statusUnknown, nil } + if err = json.Unmarshal(res, &ec); err != nil { + logrus.Errorf("unmarsharl data error, data: %v", string(res)) + return statusUnknown, err + } + switch ec.Status.Phase { case apistructs.ClusterPhaseRunning: return statusOnline, nil @@ -215,12 +204,3 @@ func parseManageType(mc *apistructs.ManageConfig) string { return "create" } } - -func getPlatformNamespace() string { - diceNs := os.Getenv("DICE_NAMESPACE") - if diceNs == "" { - diceNs = metav1.NamespaceDefault - } - - return diceNs -} diff --git a/modules/cmp/impl/clusters/import_cluster.go b/modules/cmp/impl/clusters/import_cluster.go index c666b33dc45..4bc7dc0ab69 100644 --- a/modules/cmp/impl/clusters/import_cluster.go +++ b/modules/cmp/impl/clusters/import_cluster.go @@ -20,7 +20,6 @@ import ( "fmt" "math/rand" "net/http" - "os" "strconv" "strings" "text/template" @@ -39,45 +38,35 @@ import ( "github.com/erda-project/erda/apistructs" "github.com/erda-project/erda/modules/cmp/conf" "github.com/erda-project/erda/modules/cmp/dbclient" - "github.com/erda-project/erda/pkg/discover" "github.com/erda-project/erda/pkg/http/httputil" + "github.com/erda-project/erda/pkg/k8sclient" ) const ( - KubeconfigType = "KUBECONFIG" - SAType = "SERVICEACCOUNT" - ProxyType = "PROXY" - caKey = "ca.crt" - tokenKey = "token" - ModuleClusterInit = "cluster-init" - ModuleClusterAgent = "cluster-agent" - ClusterAgentSA = ModuleClusterAgent - ClusterAgentCR = "cluster-agent-cr" - ClusterAgentCRB = "cluster-agent-crb" + KubeconfigType = "KUBECONFIG" + SAType = "SERVICEACCOUNT" + ProxyType = "PROXY" + caKey = "ca.crt" + tokenKey = "token" + ModuleClusterOps = "cluster-ops" + ClusterAgentSA = "cluster-agent" + ClusterAgentCR = "cluster-agent-cr" + ClusterAgentCRB = "cluster-agent-crb" ) var ( - initRetryTimeout = 30 * time.Second + initRetryTimeout = 30 * time.Second + getClusterTimeout = 2 * time.Second ) type RenderDeploy struct { - ClusterName string - MasterClusterDomain string // Master cluster domain, collector or openapi public - MasterClusterProtocol string - PlateFormVersion string - CustomDomain string // Target cluster custom domain - InitJobImage string - ClusterAgentImage string - ErdaHelmChartVersion string - DialerPublicAddr string - ErdaSystem string - OrgName string + ErdaNamespace string + JobImage string + Envs []corev1.EnvVar } // importCluster import cluster func (c *Clusters) importCluster(userID string, req *apistructs.ImportCluster) error { - var err error - mc, err := ParseManageConfigFromCredential(req.CredentialType, req.Credential) if err != nil { return err @@ -110,143 +99,45 @@ func (c *Clusters) importCluster(userID string, req *apistructs.ImportCluster) e return err } - status, err := c.getClusterStatus(ci) + kc, err := k8sclient.NewWithTimeOut(req.ClusterName, getClusterTimeout) if err != nil { + logrus.Errorf("get kubernetes client error, clusterName: [%s]", req.ClusterName) return err } - if !(status == statusOffline || status == statusUnknown) { - return nil - } - - cs, err := c.k8s.GetInClusterClient() + status, err := c.getClusterStatus(kc, ci) if err != nil { + logrus.Errorf("get cluster status error: %v", err) return err } - if err = c.checkNamespace(); err != nil { - return err + if req.ClusterName == conf.ErdaClusterName() || !(status == statusOffline || status == statusUnknown) { + return nil } - tc, err := c.k8s.GetClient(req.ClusterName) - if err != nil { - return err - } + workNs := getWorkerNamespace() - orgDto, err := c.bdl.GetOrg(req.OrgID) - if err != nil { + // check resource before execute cluster init job + if err = c.importPreCheck(kc, workNs); err != nil { return err } - if _, err = tc.ClientSet.CoreV1().ServiceAccounts(conf.ErdaNamespace()).Get(context.Background(), ClusterAgentSA, - metav1.GetOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - logrus.Errorf("get cluster-agent serviceAccount error: %v", err) - return err - } - if _, err = tc.ClientSet.CoreV1().ServiceAccounts(conf.ErdaNamespace()).Create(context.Background(), - &corev1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ServiceAccount", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: ClusterAgentSA, - Namespace: conf.ErdaNamespace(), - }, - }, metav1.CreateOptions{}); err != nil { - return err - } - } - - if _, err = tc.ClientSet.RbacV1().ClusterRoles().Get(context.Background(), ClusterAgentCR, - metav1.GetOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - logrus.Errorf("get cluster-agent cluster role error: %v", err) - return err - } - allRole := []string{"*"} - - if _, err = tc.ClientSet.RbacV1().ClusterRoles().Create(context.Background(), - &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRole", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: ClusterAgentCR, - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: allRole, - APIGroups: allRole, - Resources: allRole, - }, - { - Verbs: allRole, - NonResourceURLs: allRole, - }, - }, - }, metav1.CreateOptions{}); err != nil { - return err - } - } - - if _, err = tc.ClientSet.RbacV1().ClusterRoleBindings().Get(context.Background(), ClusterAgentCRB, - metav1.GetOptions{}); err != nil { - if !k8serrors.IsNotFound(err) { - logrus.Errorf("get cluster-agent cluster role binding error: %v", err) - return err - } - - if _, err = tc.ClientSet.RbacV1().ClusterRoleBindings().Create(context.Background(), - &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRoleBinding", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: ClusterAgentCRB, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: ClusterAgentSA, - Namespace: conf.ErdaNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: ClusterAgentCR, - }, - }, metav1.CreateOptions{}); err != nil { - return err - } + // check init job, if already exist, return + if _, err = kc.ClientSet.BatchV1().Jobs(workNs).Get(context.Background(), + generateInitJobName(req.OrgID, req.ClusterName), metav1.GetOptions{}); err == nil { + return nil } - nodes, err := tc.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + // create init job + initJob, err := c.generateClusterInitJob(req.OrgID, req.ClusterName, false) if err != nil { + logrus.Errorf("generate cluster init job error: %v", err) return err } - for _, node := range nodes.Items { - node.Labels[fmt.Sprintf("dice/org-%s", orgDto.Name)] = "true" - if _, err = tc.ClientSet.CoreV1().Nodes().Update(context.Background(), &node, - metav1.UpdateOptions{}); err != nil { - return err - } - } - - // check init job, if already exist, return - if _, err = cs.BatchV1().Jobs(getPlatformNamespace()).Get(context.Background(), generateInitJobName(req.OrgID, req.ClusterName), - metav1.GetOptions{}); err == nil { - return nil - } - - // create init job - if _, err = cs.BatchV1().Jobs(getPlatformNamespace()).Create(context.Background(), c.generateClusterInitJob(req.OrgID, req.ClusterName, false), + if _, err = kc.ClientSet.BatchV1().Jobs(workNs).Create(context.Background(), initJob, metav1.CreateOptions{}); err != nil { + logrus.Errorf("create cluster init job error: %v", err) return err } @@ -281,12 +172,15 @@ func (c *Clusters) ImportClusterWithRecord(userID string, req *apistructs.Import } func (c *Clusters) ClusterInitRetry(orgID uint64, req *apistructs.ClusterInitRetry) error { - cs, err := c.k8s.GetInClusterClient() + cs, err := k8sclient.New(req.ClusterName) if err != nil { return err } - if err = c.checkNamespace(); err != nil { + logrus.Infof("start retry init cluster %s", req.ClusterName) + + workNs := getWorkerNamespace() + if err = c.importPreCheck(cs, workNs); err != nil { return err } @@ -300,7 +194,7 @@ func (c *Clusters) ClusterInitRetry(orgID uint64, req *apistructs.ClusterInitRet default: // delete old init job propagationPolicy := metav1.DeletePropagationBackground - if err = cs.BatchV1().Jobs(getPlatformNamespace()).Delete(context.Background(), generateInitJobName(orgID, + if err = cs.ClientSet.BatchV1().Jobs(workNs).Delete(context.Background(), generateInitJobName(orgID, req.ClusterName), metav1.DeleteOptions{ PropagationPolicy: &propagationPolicy, }); err != nil { @@ -309,9 +203,15 @@ func (c *Clusters) ClusterInitRetry(orgID uint64, req *apistructs.ClusterInitRet time.Sleep(500 * time.Millisecond) continue } + // generate init job + initJob, err := c.generateClusterInitJob(orgID, req.ClusterName, true) + if err != nil { + logrus.Errorf("generate retry cluster init job error: %v", err) + continue + } // create job, if create error, tip retry again - if _, err = cs.BatchV1().Jobs(getPlatformNamespace()).Create(context.Background(), - c.generateClusterInitJob(orgID, req.ClusterName, true), metav1.CreateOptions{}); err != nil { + if _, err = cs.ClientSet.BatchV1().Jobs(workNs).Create(context.Background(), + initJob, metav1.CreateOptions{}); err != nil { return fmt.Errorf("create retry job error: %v, please try again", err) } return nil @@ -320,27 +220,117 @@ func (c *Clusters) ClusterInitRetry(orgID uint64, req *apistructs.ClusterInitRet } } -func (c *Clusters) checkNamespace() error { - cs, err := c.k8s.GetInClusterClient() - if err != nil { - return err +// importPreCheck check before import cluster +func (c *Clusters) importPreCheck(kc *k8sclient.K8sClient, ns string) error { + if kc == nil || kc.ClientSet == nil { + return fmt.Errorf("import cluster precheck error, kuberentes client is nil") } - // check namespace - _, err = cs.CoreV1().Namespaces().Get(context.Background(), conf.ErdaNamespace(), metav1.GetOptions{}) - if err != nil { + // check namespace, create if not exist + if _, err := kc.ClientSet.CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{}); err != nil { if !k8serrors.IsNotFound(err) { return err } - if _, err = cs.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + if _, err = kc.ClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: conf.ErdaNamespace(), + Name: ns, }, }, metav1.CreateOptions{}); err != nil { return err } } + if _, err := kc.ClientSet.CoreV1().ServiceAccounts(ns).Get(context.Background(), ClusterAgentSA, + metav1.GetOptions{}); err != nil { + if !k8serrors.IsNotFound(err) { + logrus.Errorf("get cluster-agent serviceAccount error: %v", err) + return err + } + logrus.Infof("service account %s doesn't exist, create it", ClusterAgentSA) + if _, err = kc.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.Background(), + &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterAgentSA, + Namespace: conf.ErdaNamespace(), + }, + }, metav1.CreateOptions{}); err != nil { + return err + } + } + + if _, err := kc.ClientSet.RbacV1().ClusterRoles().Get(context.Background(), ClusterAgentCR, + metav1.GetOptions{}); err != nil { + if !k8serrors.IsNotFound(err) { + logrus.Errorf("get cluster-agent cluster role error: %v", err) + return err + } + logrus.Infof("cluster role %s doesn't exist, create it", ClusterAgentCR) + + allRole := []string{"*"} + + if _, err = kc.ClientSet.RbacV1().ClusterRoles().Create(context.Background(), + &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterAgentCR, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: allRole, + APIGroups: allRole, + Resources: allRole, + }, + { + Verbs: allRole, + NonResourceURLs: allRole, + }, + }, + }, metav1.CreateOptions{}); err != nil { + return err + } + } + + if _, err := kc.ClientSet.RbacV1().ClusterRoleBindings().Get(context.Background(), ClusterAgentCRB, + metav1.GetOptions{}); err != nil { + if !k8serrors.IsNotFound(err) { + logrus.Errorf("get cluster-agent cluster role binding error: %v", err) + return err + } + logrus.Infof("cluster role binding %s doesn't exist, create it", ClusterAgentCRB) + + if _, err = kc.ClientSet.RbacV1().ClusterRoleBindings().Create(context.Background(), + &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterAgentCRB, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: ClusterAgentSA, + Namespace: ns, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: ClusterAgentCR, + }, + }, metav1.CreateOptions{}); err != nil { + return err + } + } + return nil } @@ -446,33 +436,7 @@ func (c *Clusters) RenderInitContent(orgName, clusterName string, accessKey stri return "", fmt.Errorf("accesskey is error") } - masterCluster := os.Getenv(apistructs.MasterClusterKey) - if masterCluster == "" { - return "", fmt.Errorf("can't get master cluster info") - } - - ci, err := c.bdl.QueryClusterInfo(masterCluster) - if err != nil { - return "", err - } - - version := ci.Get("DICE_VERSION") - masterClusterDomain := ci.Get("DICE_ROOT_DOMAIN") - mcProtocol := parseClusterProtocol(ci.Get("DICE_PROTOCOL")) - - rd := RenderDeploy{ - ClusterName: clusterName, - MasterClusterDomain: masterClusterDomain, - MasterClusterProtocol: mcProtocol, - PlateFormVersion: version, - CustomDomain: cluster.WildcardDomain, - InitJobImage: renderReleaseImageAddr(ModuleClusterInit, version), - ClusterAgentImage: renderReleaseImageAddr(ModuleClusterAgent, version), - ErdaHelmChartVersion: conf.ErdaHelmChartVersion(), - DialerPublicAddr: conf.DialerPublicAddr(), - ErdaSystem: conf.ErdaNamespace(), - OrgName: orgName, - } + rd, err := c.renderCommonDeployConfig(orgName, clusterName) tmpl := template.Must(template.New("render").Parse(ProxyDeployTemplate)) buf := new(bytes.Buffer) @@ -549,72 +513,27 @@ func ParseManageConfigFromCredential(credentialType string, credential apistruct } // generateClusterInitJob generate cluster init job -func (c *Clusters) generateClusterInitJob(orgID uint64, clusterName string, reInstall bool) *batchv1.Job { - jobName := generateInitJobName(orgID, clusterName) - var backOffLimit int32 - - compClusterName := os.Getenv(apistructs.MasterClusterKey) - if compClusterName == "" { - return nil - } +func (c *Clusters) generateClusterInitJob(orgID uint64, clusterName string, reInstall bool) (*batchv1.Job, error) { + var ( + backOffLimit int32 + jobName = generateInitJobName(orgID, clusterName) + ) - cci, err := c.bdl.QueryClusterInfo(compClusterName) + orgDto, err := c.bdl.GetOrg(orgID) if err != nil { - return nil + return nil, err } - eci, err := c.bdl.GetCluster(clusterName) + rd, err := c.renderCommonDeployConfig(orgDto.Name, clusterName) if err != nil { - return nil + return nil, err } - platformDomain := cci.Get("DICE_ROOT_DOMAIN") - platformVersion := cci.Get("DICE_VERSION") - mcProtocol := parseClusterProtocol(cci.Get("DICE_PROTOCOL")) - - envs := []corev1.EnvVar{ - { - Name: "ERDA_CHART_VERSION", - Value: conf.ErdaHelmChartVersion(), - }, - { - Name: "TARGET_CLUSTER", - Value: clusterName, - }, - { - Name: "INSTALL_MODE", - Value: "remote", - }, - { - Name: "REPO_MODE", - Value: "local", - }, - { - Name: "HELM_NAMESPACE", - Value: conf.ErdaNamespace(), - }, - { - Name: "CHART_ERDA_BASE_VALUES", - Value: fmt.Sprintf("configmap.clustername=%s,configmap.domain=%s", - clusterName, eci.WildcardDomain), - }, - { - Name: "CHART_ERDA_ADDONS_VALUES", - Value: "registry.networkMode=''", - }, - { - Name: "CHART_ERDA_VALUES", - Value: fmt.Sprintf("domain=%s,clusterName=%s,masterCluster.domain=%s,masterCluster.protocol=%s", - eci.WildcardDomain, clusterName, platformDomain, mcProtocol), - }, - { - Name: "CLUSTER_MANAGER_ADDR", - Value: discover.ClusterManager(), - }, - { + if reInstall { + rd.Envs = append(rd.Envs, corev1.EnvVar{ Name: "REINSTALL", - Value: strconv.FormatBool(reInstall), - }, + Value: "true", + }) } return &batchv1.Job{ @@ -624,26 +543,65 @@ func (c *Clusters) generateClusterInitJob(orgID uint64, clusterName string, reIn }, ObjectMeta: metav1.ObjectMeta{ Name: jobName, - Namespace: getPlatformNamespace(), + Namespace: getWorkerNamespace(), }, Spec: batchv1.JobSpec{ BackoffLimit: &backOffLimit, Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - RestartPolicy: "Never", + ServiceAccountName: ClusterAgentSA, + RestartPolicy: "Never", Containers: []corev1.Container{ { Name: jobName, - Image: renderReleaseImageAddr(ModuleClusterInit, platformVersion), + Image: renderReleaseImageAddr(), ImagePullPolicy: "Always", - Command: []string{"sh", "-c", fmt.Sprintf("/app/%s", ModuleClusterInit)}, - Env: envs, + Command: []string{"sh", "-c", fmt.Sprintf("/app/%s", ModuleClusterOps)}, + Env: rd.Envs, }, }, }, }, }, + }, nil +} + +// renderCommonDeployConfig render deploy struct with common config +func (c *Clusters) renderCommonDeployConfig(orgName, clusterName string) (*RenderDeploy, error) { + ci, err := c.bdl.GetCluster(clusterName) + if err != nil { + logrus.Errorf("render deploy config error: %v", err) + return nil, err } + + rd := RenderDeploy{ + ErdaNamespace: getWorkerNamespace(), + JobImage: renderReleaseImageAddr(), + Envs: []corev1.EnvVar{ + {Name: "DEBUG", Value: "true"}, + {Name: "ERDA_CHART_VERSION", Value: conf.ErdaVersion()}, + {Name: "HELM_NAMESPACE", Value: getWorkerNamespace()}, + {Name: "NODE_LABELS", Value: fmt.Sprintf("dice/org-%s=true", orgName)}, + {Name: "ERDA_CHART_VALUES", Value: generateSetValues(ci)}, + {Name: "HELM_REPO_URL", Value: conf.HelmRepoURL()}, + {Name: "HELM_REPO_USERNAME", Value: conf.HelmRepoUsername()}, + {Name: "HELM_REPO_PASSWORD", Value: conf.HelmRepoPassword()}, + }, + } + + return &rd, nil +} + +// generateSetValues generate the values of helm chart install set +func generateSetValues(ci *apistructs.ClusterInfo) string { + // current cluster type in database is k8s, dice-cluster-info need kubernetes + if ci.Type == "k8s" { + ci.Type = "kubernetes" + } + return "tags.work=true,tags.master=false," + + fmt.Sprintf("global.domain=%s,erda.clusterName=%s,", ci.WildcardDomain, ci.Name) + + fmt.Sprintf("erda.clusterConfig.clusterType=%s,", strings.ToLower(ci.Type)) + + fmt.Sprintf("erda.masterCluster.domain=%s,erda.masterCluster.protocol=%s", conf.ErdaDomain(), conf.ErdaProtocol()) } // generateAccessKey generate accessKey @@ -670,26 +628,19 @@ func generateAccessKey(customLen int) string { return strings.Join(res, "") } -func parseClusterProtocol(protocol string) string { - var ( - protocolHttp = "http" - protocolHttps = "https" - ) - - if strings.Contains(strings.ToLower(protocol), protocolHttps) { - return protocolHttps - } - - return protocolHttp -} - // renderReleaseImageAddr render release image with module name and version -// e.g. registry.erda.cloud/erda:v1.1 -func renderReleaseImageAddr(module string, version string) string { - return fmt.Sprintf("%s/%s:v%s", conf.ReleaseRepo(), module, version) +// e.g. registry.erda.cloud/erda/cluster-init:v0.1 +func renderReleaseImageAddr() string { + return fmt.Sprintf("%s/%s:v%s", conf.ReleaseRegistry(), ModuleClusterOps, conf.ClusterInitVersion()) } // generateInitJobName generate init job name with orgID and clusterName func generateInitJobName(orgID uint64, clusterName string) string { return fmt.Sprintf("erda-cluster-init-%d-%s", orgID, clusterName) } + +// getWorkerNamespace get work node namespace +func getWorkerNamespace() string { + // TODO: support different namespace of master and slave + return conf.ErdaNamespace() +} diff --git a/modules/cmp/impl/clusters/proxy-deploy.go b/modules/cmp/impl/clusters/proxy-deploy.go index 5e4843af536..1165d83af84 100644 --- a/modules/cmp/impl/clusters/proxy-deploy.go +++ b/modules/cmp/impl/clusters/proxy-deploy.go @@ -20,13 +20,13 @@ var ProxyDeployTemplate = ` apiVersion: v1 kind: Namespace metadata: - name: {{.ErdaSystem}} + name: {{ .ErdaNamespace }} --- apiVersion: v1 kind: ServiceAccount metadata: name: cluster-agent - namespace: {{.ErdaSystem}} + namespace: {{ .ErdaNamespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -55,7 +55,7 @@ roleRef: subjects: - kind: ServiceAccount name: cluster-agent - namespace: {{.ErdaSystem}} + namespace: {{ .ErdaNamespace }} --- apiVersion: batch/v1 kind: Job @@ -63,7 +63,7 @@ metadata: labels: job-name: erda-cluster-init name: erda-cluster-init - namespace: {{.ErdaSystem}} + namespace: {{ .ErdaNamespace }} spec: backoffLimit: 0 selector: @@ -79,28 +79,14 @@ spec: containers: - name: init env: - - name: "DEBUG" - value: "true" - - name: "ERDA_CHART_VERSION" - value: "{{.ErdaHelmChartVersion}}" - - name: "INSTALL_MODE" - value: "local" - - name: "REPO_MODE" - value: "local" - - name: "HELM_NAMESPACE" - value: {{.ErdaSystem}} - - name: "NODE_LABELS" - value: "dice/org-{{.OrgName}}=true" - - name: "CHART_ERDA_BASE_VALUES" - value: "configmap.clustername={{.ClusterName}},configmap.domain={{.CustomDomain}}" - - name: "CHART_ERDA_ADDONS_VALUES" - value: "registry.networkMode=''" - - name: "CHART_ERDA_VALUES" - value: "domain={{.CustomDomain}},clusterName={{.ClusterName}},masterCluster.domain={{.MasterClusterDomain}},masterCluster.protocol={{.MasterClusterProtocol}}" + {{- range .Envs }} + - name: {{ .Name }} + value: "{{ .Value }}" + {{- end }} command: - sh - -c - - /app/cluster-init - image: {{.InitJobImage}} + - /app/cluster-ops + image: {{ .JobImage }} imagePullPolicy: Always ` diff --git a/modules/cmp/impl/clusters/update_cluster.go b/modules/cmp/impl/clusters/update_cluster.go index d73168624f6..8508255fb0e 100644 --- a/modules/cmp/impl/clusters/update_cluster.go +++ b/modules/cmp/impl/clusters/update_cluster.go @@ -34,8 +34,9 @@ func (c *Clusters) UpdateCluster(req apistructs.CMPClusterUpdateRequest, header mc = cluster.ManageConfig - // if credential content is empty, use latest credential data. - if req.Credential.Content != "" || strings.ToUpper(req.CredentialType) == ProxyType { + // if credential content is empty, use the latest credential data. + // if credential change to agent from other type, clear credential info + if req.Credential.Content != "" || (mc.Type != apistructs.ManageProxy && strings.ToUpper(req.CredentialType) == ProxyType) { // parse manage config from credential info mc, err = ParseManageConfigFromCredential(req.CredentialType, req.Credential) if err != nil { diff --git a/modules/cmp/impl/clusters/upgrade_edge_cluster.go b/modules/cmp/impl/clusters/upgrade_edge_cluster.go index ad995a90526..df8b5876967 100644 --- a/modules/cmp/impl/clusters/upgrade_edge_cluster.go +++ b/modules/cmp/impl/clusters/upgrade_edge_cluster.go @@ -29,17 +29,15 @@ import ( "github.com/erda-project/erda/apistructs" "github.com/erda-project/erda/bundle" "github.com/erda-project/erda/modules/cmp/dbclient" - "github.com/erda-project/erda/modules/cmp/services/kubernetes" ) type Clusters struct { db *dbclient.DBClient bdl *bundle.Bundle - k8s *kubernetes.Kubernetes } func New(db *dbclient.DBClient, bdl *bundle.Bundle) *Clusters { - return &Clusters{db: db, bdl: bdl, k8s: &kubernetes.Kubernetes{}} + return &Clusters{db: db, bdl: bdl} } // status: diff --git a/modules/cmp/initialize.go b/modules/cmp/initialize.go index bcd5f186a7a..b0b2356bf84 100644 --- a/modules/cmp/initialize.go +++ b/modules/cmp/initialize.go @@ -16,7 +16,6 @@ package cmp import ( "context" - "fmt" "os" "strings" "time" @@ -210,19 +209,4 @@ func registerWebHook(bdl *bundle.Bundle) { if err := bdl.CreateWebhook(ev); err != nil { logrus.Warnf("failed to register pipeline tasks event, (%v)", err) } - - clusterEv := apistructs.CreateHookRequest{ - Name: "cmp-clusterhook", - Events: []string{"cluster"}, - URL: fmt.Sprintf("http://%s/api/clusterhook", discover.CMP()), - Active: true, - HookLocation: apistructs.HookLocation{ - Org: "-1", - Project: "-1", - Application: "-1", - }, - } - if err := bdl.CreateWebhook(clusterEv); err != nil { - logrus.Warnf("failed to register cluster event, (%v)", err) - } } diff --git a/modules/cmp/services/kubernetes/kubernetes.go b/modules/cmp/services/kubernetes/kubernetes.go deleted file mode 100644 index 4bb1364409b..00000000000 --- a/modules/cmp/services/kubernetes/kubernetes.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2021 Terminus, Inc. -// -// This program is free software: you can use, redistribute, and/or modify -// it under the terms of the GNU Affero General Public License, version 3 -// or later ("AGPL"), as published by the Free Software Foundation. -// -// This program is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package kubernetes - -import ( - "sync" - "time" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/erda-project/erda/pkg/k8sclient" -) - -var ( - InClusterClient *kubernetes.Clientset - defaultTimeout = 2 * time.Second -) - -type Kubernetes struct { - sync.RWMutex - clients map[string]*k8sclient.K8sClient -} - -func (k *Kubernetes) GetCacheClients() map[string]*k8sclient.K8sClient { - return k.clients -} - -func (k *Kubernetes) GetInClusterClient() (*kubernetes.Clientset, error) { - if InClusterClient != nil { - return InClusterClient, nil - } - rc, err := rest.InClusterConfig() - if err != nil { - return nil, err - } - - rc.QPS = 100 - rc.Burst = 100 - - cs, err := kubernetes.NewForConfig(rc) - if err != nil { - return nil, err - } - - return cs, nil -} - -func (k *Kubernetes) CreateClient(clusterName string) error { - nClient, err := k8sclient.NewWithTimeOut(clusterName, defaultTimeout) - if err != nil { - return err - } - - k.writeMap(clusterName, nClient) - - return nil -} - -func (k *Kubernetes) GetClient(clusterName string) (*k8sclient.K8sClient, error) { - client, ok := k.readMap(clusterName) - if !ok { - nClient, err := k8sclient.NewWithTimeOut(clusterName, defaultTimeout) - if err != nil { - return nil, err - } - k.writeMap(clusterName, nClient) - return nClient, nil - } - return client, nil -} - -func (k *Kubernetes) UpdateClient(clusterName string) error { - k.RemoveClient(clusterName) - return k.CreateClient(clusterName) -} - -func (k *Kubernetes) RemoveClient(clusterName string) { - k.Lock() - defer k.Unlock() - delete(k.clients, clusterName) -} - -func (k *Kubernetes) readMap(clusterName string) (*k8sclient.K8sClient, bool) { - k.RLock() - v, ok := k.clients[clusterName] - k.RUnlock() - return v, ok -} - -func (k *Kubernetes) writeMap(clusterName string, client *k8sclient.K8sClient) { - if k.clients == nil { - k.clients = make(map[string]*k8sclient.K8sClient, 0) - } - - k.Lock() - k.clients[clusterName] = client - k.Unlock() -}