From 14bedfd9cd3a7a17e397f81d251fb095d3a697e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wilson=20J=C3=BAnior?= Date: Thu, 25 Aug 2022 15:51:33 -0300 Subject: [PATCH] working on a internal api to provide way to integrate with prometheus blackbox --- main.go | 11 +++ pkg/controllerapi/controllerapi.go | 134 +++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 pkg/controllerapi/controllerapi.go diff --git a/main.go b/main.go index 4235e2c0c..9a718a8a0 100644 --- a/main.go +++ b/main.go @@ -7,6 +7,8 @@ package main import ( "flag" "fmt" + "log" + "net/http" "os" "time" @@ -16,6 +18,7 @@ import ( "github.com/tsuru/rpaas-operator/controllers" "github.com/tsuru/rpaas-operator/internal/registry" + "github.com/tsuru/rpaas-operator/pkg/controllerapi" extensionsruntime "github.com/tsuru/rpaas-operator/pkg/runtime" // +kubebuilder:scaffold:imports ) @@ -25,6 +28,7 @@ var setupLog = ctrl.Log.WithName("setup") type configOpts struct { metricsAddr string healthAddr string + internalAPIAddr string leaderElection bool leaderElectionNamespace string leaderElectionResourceName string @@ -39,6 +43,7 @@ func (o *configOpts) bindFlags(fs *flag.FlagSet) { // See more: https://github.com/kubernetes-sigs/kubebuilder/issues/1839 fs.StringVar(&o.metricsAddr, "metrics-bind-address", ":8080", "The TCP address that controller should bind to for serving Prometheus metrics. It can be set to \"0\" to disable the metrics serving.") fs.StringVar(&o.healthAddr, "health-probe-bind-address", ":8081", "The TCP address that controller should bind to for serving health probes.") + fs.StringVar(&o.internalAPIAddr, "internal-api-address", ":8082", "The TCP address that controller should bind to for internal controller API.") fs.BoolVar(&o.leaderElection, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.") fs.StringVar(&o.leaderElectionResourceName, "leader-elect-resource-name", "rpaas-operator-lock", "The name of resource object that is used for locking during leader election.") @@ -102,6 +107,12 @@ func main() { } // +kubebuilder:scaffold:builder + // controllerapi + go func() { + setupLog.Info("starting internalapi", "addr", opts.internalAPIAddr) + log.Fatal(http.ListenAndServe(opts.internalAPIAddr, controllerapi.New(mgr.GetClient()))) + }() + setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/pkg/controllerapi/controllerapi.go b/pkg/controllerapi/controllerapi.go new file mode 100644 index 000000000..4289d2c3d --- /dev/null +++ b/pkg/controllerapi/controllerapi.go @@ -0,0 +1,134 @@ +package controllerapi + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/tsuru/nginx-operator/api/v1alpha1" + sigsk8sclient "sigs.k8s.io/controller-runtime/pkg/client" + + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const healthcheckPath = "/_nginx_healthcheck" + +type prometheusDiscoverHandler struct { + client sigsk8sclient.Client +} + +func (h *prometheusDiscoverHandler) svcMap(ctx context.Context) (map[sigsk8sclient.ObjectKey]*coreV1.Service, error) { + svcMap := map[sigsk8sclient.ObjectKey]*coreV1.Service{} + allNginxServices := &coreV1.ServiceList{} + err := h.client.List(ctx, allNginxServices, &sigsk8sclient.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{ + "nginx.tsuru.io/app": "nginx", + }), + }) + + if err != nil { + return nil, err + } + + for i, svc := range allNginxServices.Items { + svcMap[sigsk8sclient.ObjectKeyFromObject(&svc)] = &allNginxServices.Items[i] + } + + return svcMap, nil +} + +func rpaasTargetGroups(svcMap map[sigsk8sclient.ObjectKey]*coreV1.Service, nginxInstance *v1alpha1.Nginx) []TargetGroup { + targetGroups := []TargetGroup{} + + for _, service := range nginxInstance.Status.Services { + + svc := svcMap[sigsk8sclient.ObjectKey{ + Namespace: nginxInstance.Namespace, + Name: service.Name, + }] + + if svc == nil { + continue + } + + if len(svc.Status.LoadBalancer.Ingress) == 0 { + continue + } + + svcIP := svc.Status.LoadBalancer.Ingress[0].IP + + serviceInstance := svc.Labels["rpaas.extensions.tsuru.io/instance-name"] + service := svc.Labels["rpaas.extensions.tsuru.io/service-name"] + + targetGroups = append(targetGroups, TargetGroup{ + Targets: []string{ + "http://" + svcIP + healthcheckPath, + }, + Labels: map[string]string{ + "service_instance": serviceInstance, + "service": service, + }, + }) + + for _, tls := range nginxInstance.Spec.TLS { + for _, host := range tls.Hosts { + targetGroups = append(targetGroups, TargetGroup{ + Targets: []string{ + "https://" + svcIP + healthcheckPath, + }, + Labels: map[string]string{ + "service_instance": serviceInstance, + "service": service, + "servername": host, + }, + }) + } + } + } + + return targetGroups +} + +// TargetGroup is a collection of related hosts that prometheus monitors +type TargetGroup struct { + Targets []string `json:"targets"` + Labels map[string]string `json:"labels"` +} + +func (h *prometheusDiscoverHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + svcMap, err := h.svcMap(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + allNginx := &v1alpha1.NginxList{} + + err = h.client.List(ctx, allNginx, &sigsk8sclient.ListOptions{}) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + targetGroups := []TargetGroup{} + for _, nginxInstance := range allNginx.Items { + targetGroups = append(targetGroups, rpaasTargetGroups(svcMap, &nginxInstance)...) + } + + w.Header().Set("Content-Type", "application/json") + encoder := json.NewEncoder(w) + encoder.SetIndent(" ", " ") + err = encoder.Encode(targetGroups) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func New(client sigsk8sclient.Client) http.Handler { + mux := http.NewServeMux() + mux.Handle("/v1/prometheus/discover", &prometheusDiscoverHandler{client: client}) + + return mux +}