From ca36e01603ed72189ec18fd5cabfa78cfba8768c Mon Sep 17 00:00:00 2001 From: Stavros Foteinopoulos Date: Wed, 13 Jan 2021 16:40:40 +0200 Subject: [PATCH] Remove prometheus-adapter temporary. (#396) --- .../provisioner/kops_provisioner_cluster.go | 17 - .../prometheus-adapter.yaml | 319 ------------------ 2 files changed, 336 deletions(-) delete mode 100644 manifests/prometheus-adapter/prometheus-adapter.yaml diff --git a/internal/provisioner/kops_provisioner_cluster.go b/internal/provisioner/kops_provisioner_cluster.go index 943aa6da7..c324196fe 100644 --- a/internal/provisioner/kops_provisioner_cluster.go +++ b/internal/provisioner/kops_provisioner_cluster.go @@ -293,20 +293,6 @@ func (provisioner *KopsProvisioner) ProvisionCluster(cluster *model.Cluster, aws return errors.Wrap(err, "failed to delete APIService v1beta1.metrics.k8s.io") } - logger.Info("Cleaning up some prometheus-adapter resources to reapply") - err = k8sClient.Clientset.CoreV1().Services("kube-system").Delete(ctx, "prometheus-adapter", metav1.DeleteOptions{}) - if k8sErrors.IsNotFound(err) { - logger.Info("Service metrics-server not found; skipping...") - } else if err != nil { - return errors.Wrap(err, "failed to delete service metrics-server") - } - err = k8sClient.KubeagClientSet.ApiregistrationV1beta1().APIServices().Delete(ctx, "v1beta1.custom.metrics.k8s.io", metav1.DeleteOptions{}) - if k8sErrors.IsNotFound(err) { - logger.Info("APIService v1beta1.metrics.k8s.io not found; skipping...") - } else if err != nil { - return errors.Wrap(err, "failed to delete APIService v1beta1.custom.metrics.k8s.io") - } - // TODO: determine if we want to hard-code the k8s resource objects in code. // For now, we will ingest manifest files to deploy the mattermost operator. files := []k8s.ManifestFile{ @@ -343,9 +329,6 @@ func (provisioner *KopsProvisioner) ProvisionCluster(cluster *model.Cluster, aws }, { Path: "manifests/metric-server/metric-server.yaml", DeployNamespace: "kube-system", - }, { - Path: "manifests/prometheus-adapter/prometheus-adapter.yaml", - DeployNamespace: "kube-system", }, } err = k8sClient.CreateFromFiles(files) diff --git a/manifests/prometheus-adapter/prometheus-adapter.yaml b/manifests/prometheus-adapter/prometheus-adapter.yaml deleted file mode 100644 index fa2f4446d..000000000 --- a/manifests/prometheus-adapter/prometheus-adapter.yaml +++ /dev/null @@ -1,319 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus-adapter -rules: - - apiGroups: - - "" - resources: - - nodes - - namespaces - - pods - - services - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-view: "true" - name: system:aggregated-metrics-reader -rules: - - apiGroups: - - custom.metrics.k8s.io - - metrics.k8s.io - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: prometheus-adapter -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus-adapter -subjects: - - kind: ServiceAccount - name: prometheus-adapter - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: resource-metrics:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: - - kind: ServiceAccount - name: prometheus-adapter - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: resource-metrics-server-resources -rules: - - apiGroups: - # I assumed here should need also metrics.k8s.io but seems it doesn't - - custom.metrics.k8s.io - - metrics.k8s.io - resources: - - '*' - verbs: - - '*' ---- -apiVersion: v1 -data: - config.yaml: | - rules: - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: [] - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_seconds_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)$ - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_total$ - resources: - template: <<.Resource>> - name: - matches: "" - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_seconds_total - resources: - template: <<.Resource>> - name: - matches: ^(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: [] - resources: - template: <<.Resource>> - name: - matches: ^(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - resourceRules: - cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) - nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - window: 1m - externalRules: - - seriesQuery: '{__name__=~"^.*_queue_(length|size)$",namespace!=""}' - resources: - overrides: - namespace: - resource: namespace - name: - matches: ^.*_queue_(length|size)$ - as: "$0" - metricsQuery: max(<<.Series>>{<<.LabelMatchers>>}) - - seriesQuery: '{__name__=~"^.*_queue$",namespace!=""}' - resources: - overrides: - namespace: - resource: namespace - name: - matches: ^.*_queue$ - as: "$0" - metricsQuery: max(<<.Series>>{<<.LabelMatchers>>}) -kind: ConfigMap -metadata: - name: adapter-config - namespace: kube-system ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.custom.metrics.k8s.io -spec: - group: custom.metrics.k8s.io - groupPriorityMinimum: 100 - insecureSkipTLSVerify: true - service: - name: prometheus-adapter - namespace: kube-system - version: v1beta1 - versionPriority: 100 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: prometheus-adapter - namespace: kube-system -spec: - replicas: 0 - selector: - matchLabels: - name: prometheus-adapter - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - template: - metadata: - labels: - name: prometheus-adapter - spec: - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: kops.k8s.io/instancegroup - operator: In - values: - - nodes-utilities - weight: 1 - containers: - - args: - - --cert-dir=/var/run/serving-cert - - --config=/etc/adapter/config.yaml - - --logtostderr=true - - --metrics-relist-interval=1m - - --prometheus-url=http://prometheus-operator-kube-p-prometheus.prometheus.svc.cluster.local.:9090/ - - --secure-port=6443 - image: quay.io/coreos/k8s-prometheus-adapter-amd64:v0.6.0 - imagePullPolicy: IfNotPresent - name: prometheus-adapter - ports: - - containerPort: 6443 - resources: - requests: - cpu: 600m - memory: 717Mi - limits: - cpu: 1000m - memory: 1Gi - volumeMounts: - - mountPath: /tmp - name: tmpfs - readOnly: false - - mountPath: /var/run/serving-cert - name: volume-serving-cert - readOnly: false - - mountPath: /etc/adapter - name: config - readOnly: false - serviceAccountName: prometheus-adapter - volumes: - - emptyDir: {} - name: tmpfs - - emptyDir: {} - name: volume-serving-cert - - configMap: - name: adapter-config - name: config ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: resource-metrics-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: prometheus-adapter - namespace: kube-system ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: prometheus-adapter - name: prometheus-adapter - namespace: kube-system -spec: - ports: - - name: https - port: 443 - targetPort: 6443 - selector: - name: prometheus-adapter ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus-adapter - namespace: kube-system