From c1ff13528080fbfa1240b54deea55f3223d4bd74 Mon Sep 17 00:00:00 2001 From: "Huabing (Robin) Zhao" Date: Fri, 13 Dec 2024 06:19:50 +0800 Subject: [PATCH 01/16] fix: decouple gateway status updates from the reconciler (#4767) * decoup gateway status update Signed-off-by: Huabing Zhao * decoup gatewayclass status update Signed-off-by: Huabing Zhao * fix test Signed-off-by: Huabing Zhao * add comment Signed-off-by: Huabing Zhao * fix test Signed-off-by: Huabing Zhao * fix test Signed-off-by: Huabing Zhao * revert gateway api runner Signed-off-by: Huabing Zhao * update address and programming status Signed-off-by: Huabing Zhao * Revert "update address and programming status" This reverts commit bf3d07ea76c681afab9dbfe16c702475c9b39a67. * avoid overriding the gateway status from Gateway API translator Signed-off-by: Huabing Zhao * minor wording Signed-off-by: Huabing Zhao * minor wording Signed-off-by: Huabing Zhao * only subscribe to status updates upon acquiring leadership Signed-off-by: Huabing Zhao * fix lint Signed-off-by: Huabing Zhao * minor wording Signed-off-by: Huabing Zhao * address comment Signed-off-by: Huabing Zhao * address comment Signed-off-by: Huabing Zhao * minor wording Signed-off-by: Huabing Zhao * fix lint Signed-off-by: Huabing Zhao * minor change Signed-off-by: Huabing Zhao * release note Signed-off-by: Huabing Zhao --------- Signed-off-by: Huabing Zhao --- internal/envoygateway/config/config.go | 12 ++-- internal/infrastructure/runner/runner.go | 8 +-- internal/message/types.go | 13 ++-- internal/provider/kubernetes/controller.go | 34 +++++++---- internal/provider/kubernetes/kubernetes.go | 33 +++++----- internal/provider/kubernetes/predicates.go | 26 +++++++- .../provider/kubernetes/predicates_test.go | 3 + internal/provider/kubernetes/status.go | 61 +++++++++---------- release-notes/current.yaml | 1 + 9 files changed, 112 insertions(+), 79 deletions(-) diff --git a/internal/envoygateway/config/config.go b/internal/envoygateway/config/config.go index c842c184e4c..af05dac0753 100644 --- a/internal/envoygateway/config/config.go +++ b/internal/envoygateway/config/config.go @@ -7,6 +7,7 @@ package config import ( "errors" + "sync" egv1a1 "github.com/envoyproxy/gateway/api/v1alpha1" "github.com/envoyproxy/gateway/api/v1alpha1/validation" @@ -37,19 +38,22 @@ type Server struct { // Logger is the logr implementation used by Envoy Gateway. Logger logging.Logger // Elected chan is used to signal what a leader is elected - Elected chan struct{} + Elected *sync.WaitGroup } // New returns a Server with default parameters. func New() (*Server, error) { - return &Server{ + server := &Server{ EnvoyGateway: egv1a1.DefaultEnvoyGateway(), Namespace: env.Lookup("ENVOY_GATEWAY_NAMESPACE", DefaultNamespace), DNSDomain: env.Lookup("KUBERNETES_CLUSTER_DOMAIN", DefaultDNSDomain), // the default logger Logger: logging.DefaultLogger(egv1a1.LogLevelInfo), - Elected: make(chan struct{}), - }, nil + Elected: &sync.WaitGroup{}, + } + // Block the tasks that are waiting for the leader to be elected + server.Elected.Add(1) + return server, nil } // Validate validates a Server config. diff --git a/internal/infrastructure/runner/runner.go b/internal/infrastructure/runner/runner.go index 6896a6e5a16..3344ca0d349 100644 --- a/internal/infrastructure/runner/runner.go +++ b/internal/infrastructure/runner/runner.go @@ -72,12 +72,8 @@ func (r *Runner) Start(ctx context.Context) (err error) { if r.EnvoyGateway.Provider.Type == egv1a1.ProviderTypeKubernetes && !ptr.Deref(r.EnvoyGateway.Provider.Kubernetes.LeaderElection.Disable, false) { go func() { - select { - case <-ctx.Done(): - return - case <-r.Elected: - initInfra() - } + r.Elected.Wait() + initInfra() }() return } diff --git a/internal/message/types.go b/internal/message/types.go index 3e3923e6cb2..2eee7f90345 100644 --- a/internal/message/types.go +++ b/internal/message/types.go @@ -75,12 +75,13 @@ func (p *ProviderResources) Close() { // GatewayAPIStatuses contains gateway API resources statuses type GatewayAPIStatuses struct { - GatewayStatuses watchable.Map[types.NamespacedName, *gwapiv1.GatewayStatus] - HTTPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1.HTTPRouteStatus] - GRPCRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1.GRPCRouteStatus] - TLSRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.TLSRouteStatus] - TCPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.TCPRouteStatus] - UDPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.UDPRouteStatus] + GatewayClassStatuses watchable.Map[types.NamespacedName, *gwapiv1.GatewayClassStatus] + GatewayStatuses watchable.Map[types.NamespacedName, *gwapiv1.GatewayStatus] + HTTPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1.HTTPRouteStatus] + GRPCRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1.GRPCRouteStatus] + TLSRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.TLSRouteStatus] + TCPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.TCPRouteStatus] + UDPRouteStatuses watchable.Map[types.NamespacedName, *gwapiv1a2.UDPRouteStatus] } func (s *GatewayAPIStatuses) Close() { diff --git a/internal/provider/kubernetes/controller.go b/internal/provider/kubernetes/controller.go index f71ebee9520..bcb6fa8772c 100644 --- a/internal/provider/kubernetes/controller.go +++ b/internal/provider/kubernetes/controller.go @@ -129,13 +129,21 @@ func newGatewayAPIController(mgr manager.Manager, cfg *config.Server, su Updater } r.log.Info("created gatewayapi controller") - // Subscribe to status updates - r.subscribeAndUpdateStatus(ctx, cfg.EnvoyGateway.EnvoyGatewaySpec.ExtensionManager != nil) - // Watch resources if err := r.watchResources(ctx, mgr, c); err != nil { return fmt.Errorf("error watching resources: %w", err) } + + // When leader election is enabled, only subscribe to status updates upon acquiring leadership. + if cfg.EnvoyGateway.Provider.Type == egv1a1.ProviderTypeKubernetes && + !ptr.Deref(cfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.Disable, false) { + go func() { + cfg.Elected.Wait() + r.subscribeAndUpdateStatus(ctx, cfg.EnvoyGateway.EnvoyGatewaySpec.ExtensionManager != nil) + }() + } else { + r.subscribeAndUpdateStatus(ctx, cfg.EnvoyGateway.EnvoyGatewaySpec.ExtensionManager != nil) + } return nil } @@ -199,9 +207,12 @@ func (r *gatewayAPIReconciler) Reconcile(ctx context.Context, _ reconcile.Reques if managedGC.Spec.ParametersRef != nil && managedGC.DeletionTimestamp == nil { if err := r.processGatewayClassParamsRef(ctx, managedGC, resourceMappings, gwcResource); err != nil { msg := fmt.Sprintf("%s: %v", status.MsgGatewayClassInvalidParams, err) - if err := r.updateStatusForGatewayClass(ctx, managedGC, false, string(gwapiv1.GatewayClassReasonInvalidParameters), msg); err != nil { - r.log.Error(err, "unable to update GatewayClass status") - } + gc := status.SetGatewayClassAccepted( + managedGC.DeepCopy(), + false, + string(gwapiv1.GatewayClassReasonInvalidParameters), + msg) + r.resources.GatewayClassStatuses.Store(utils.NamespacedName(gc), &gc.Status) r.log.Error(err, "failed to process parametersRef for gatewayclass", "name", managedGC.Name) return reconcile.Result{}, err } @@ -293,11 +304,12 @@ func (r *gatewayAPIReconciler) Reconcile(ctx context.Context, _ reconcile.Reques // process envoy gateway secret refs r.processEnvoyProxySecretRef(ctx, gwcResource) - - if err := r.updateStatusForGatewayClass(ctx, managedGC, true, string(gwapiv1.GatewayClassReasonAccepted), status.MsgValidGatewayClass); err != nil { - r.log.Error(err, "unable to update GatewayClass status") - return reconcile.Result{}, err - } + gc := status.SetGatewayClassAccepted( + managedGC.DeepCopy(), + true, + string(gwapiv1.GatewayClassReasonAccepted), + status.MsgValidGatewayClass) + r.resources.GatewayClassStatuses.Store(utils.NamespacedName(gc), &gc.Status) if len(gwcResource.Gateways) == 0 { r.log.Info("No gateways found for accepted gatewayclass") diff --git a/internal/provider/kubernetes/kubernetes.go b/internal/provider/kubernetes/kubernetes.go index 4fdbc329dd0..56f96e70a18 100644 --- a/internal/provider/kubernetes/kubernetes.go +++ b/internal/provider/kubernetes/kubernetes.go @@ -36,40 +36,40 @@ type Provider struct { } // New creates a new Provider from the provided EnvoyGateway. -func New(cfg *rest.Config, svr *ec.Server, resources *message.ProviderResources) (*Provider, error) { +func New(restCfg *rest.Config, svrCfg *ec.Server, resources *message.ProviderResources) (*Provider, error) { // TODO: Decide which mgr opts should be exposed through envoygateway.provider.kubernetes API. mgrOpts := manager.Options{ Scheme: envoygateway.GetScheme(), - Logger: svr.Logger.Logger, + Logger: svrCfg.Logger.Logger, HealthProbeBindAddress: ":8081", LeaderElectionID: "5b9825d2.gateway.envoyproxy.io", - LeaderElectionNamespace: svr.Namespace, + LeaderElectionNamespace: svrCfg.Namespace, } log.SetLogger(mgrOpts.Logger) klog.SetLogger(mgrOpts.Logger) - if !ptr.Deref(svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.Disable, false) { + if !ptr.Deref(svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.Disable, false) { mgrOpts.LeaderElection = true - if svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.LeaseDuration != nil { - ld, err := time.ParseDuration(string(*svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.LeaseDuration)) + if svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.LeaseDuration != nil { + ld, err := time.ParseDuration(string(*svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.LeaseDuration)) if err != nil { return nil, err } mgrOpts.LeaseDuration = ptr.To(ld) } - if svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.RetryPeriod != nil { - rp, err := time.ParseDuration(string(*svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.RetryPeriod)) + if svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.RetryPeriod != nil { + rp, err := time.ParseDuration(string(*svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.RetryPeriod)) if err != nil { return nil, err } mgrOpts.RetryPeriod = ptr.To(rp) } - if svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.RenewDeadline != nil { - rd, err := time.ParseDuration(string(*svr.EnvoyGateway.Provider.Kubernetes.LeaderElection.RenewDeadline)) + if svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.RenewDeadline != nil { + rd, err := time.ParseDuration(string(*svrCfg.EnvoyGateway.Provider.Kubernetes.LeaderElection.RenewDeadline)) if err != nil { return nil, err } @@ -78,13 +78,13 @@ func New(cfg *rest.Config, svr *ec.Server, resources *message.ProviderResources) mgrOpts.Controller = config.Controller{NeedLeaderElection: ptr.To(false)} } - if svr.EnvoyGateway.NamespaceMode() { + if svrCfg.EnvoyGateway.NamespaceMode() { mgrOpts.Cache.DefaultNamespaces = make(map[string]cache.Config) - for _, watchNS := range svr.EnvoyGateway.Provider.Kubernetes.Watch.Namespaces { + for _, watchNS := range svrCfg.EnvoyGateway.Provider.Kubernetes.Watch.Namespaces { mgrOpts.Cache.DefaultNamespaces[watchNS] = cache.Config{} } } - mgr, err := ctrl.NewManager(cfg, mgrOpts) + mgr, err := ctrl.NewManager(restCfg, mgrOpts) if err != nil { return nil, fmt.Errorf("failed to create manager: %w", err) } @@ -95,7 +95,7 @@ func New(cfg *rest.Config, svr *ec.Server, resources *message.ProviderResources) } // Create and register the controllers with the manager. - if err := newGatewayAPIController(mgr, svr, updateHandler.Writer(), resources); err != nil { + if err := newGatewayAPIController(mgr, svrCfg, updateHandler.Writer(), resources); err != nil { return nil, fmt.Errorf("failted to create gatewayapi controller: %w", err) } @@ -109,11 +109,10 @@ func New(cfg *rest.Config, svr *ec.Server, resources *message.ProviderResources) return nil, fmt.Errorf("unable to set up ready check: %w", err) } - // Emit elected & continue with envoyObjects of infra resources + // Emit elected & continue with the tasks that require leadership. go func() { <-mgr.Elected() - // WARN: DO NOT CLOSE IT - svr.Elected <- struct{}{} + svrCfg.Elected.Done() }() return &Provider{ diff --git a/internal/provider/kubernetes/predicates.go b/internal/provider/kubernetes/predicates.go index d25ec2fb7d4..16bb9361b04 100644 --- a/internal/provider/kubernetes/predicates.go +++ b/internal/provider/kubernetes/predicates.go @@ -294,7 +294,7 @@ func (r *gatewayAPIReconciler) validateServiceForReconcile(obj client.Object) bo // Check if the Service belongs to a Gateway, if so, update the Gateway status. gtw := r.findOwningGateway(ctx, labels) if gtw != nil { - r.updateStatusForGateway(ctx, gtw) + r.updateGatewayStatus(gtw) return false } @@ -528,7 +528,7 @@ func (r *gatewayAPIReconciler) validateObjectForReconcile(obj client.Object) boo // Check if the obj belongs to a Gateway, if so, update the Gateway status. gtw := r.findOwningGateway(ctx, labels) if gtw != nil { - r.updateStatusForGateway(ctx, gtw) + r.updateGatewayStatus(gtw) return false } } @@ -636,12 +636,32 @@ func (r *gatewayAPIReconciler) updateStatusForGatewaysUnderGatewayClass(ctx cont } for _, gateway := range gateways.Items { - r.updateStatusForGateway(ctx, &gateway) + r.updateGatewayStatus(&gateway) } return nil } +// updateGatewayStatus triggers a status update for the Gateway. +func (r *gatewayAPIReconciler) updateGatewayStatus(gateway *gwapiv1.Gateway) { + gwName := utils.NamespacedName(gateway) + status := &gateway.Status + // Use the existing status if it exists to avoid losing the status calculated by the Gateway API translator. + if existing, ok := r.resources.GatewayStatuses.Load(gwName); ok { + status = existing + } + + // Since the status does not reflect the actual changed status, we need to delete it first + // to prevent it from being considered unchanged. This ensures that subscribers receive the update event. + r.resources.GatewayStatuses.Delete(gwName) + // The status that is stored in the GatewayStatuses GatewayStatuses is solely used to trigger the status updater + // and does not reflect the real changed status. + // + // The status updater will check the Envoy Proxy service to get the addresses of the Gateway, + // and check the Envoy Proxy Deployment/DaemonSet to get the status of the Gateway workload. + r.resources.GatewayStatuses.Store(gwName, status) +} + func (r *gatewayAPIReconciler) handleNode(obj client.Object) bool { ctx := context.Background() node, ok := obj.(*corev1.Node) diff --git a/internal/provider/kubernetes/predicates_test.go b/internal/provider/kubernetes/predicates_test.go index d8abf845f4d..8ff155f46f4 100644 --- a/internal/provider/kubernetes/predicates_test.go +++ b/internal/provider/kubernetes/predicates_test.go @@ -26,6 +26,7 @@ import ( "github.com/envoyproxy/gateway/internal/gatewayapi/resource" "github.com/envoyproxy/gateway/internal/infrastructure/kubernetes/proxy" "github.com/envoyproxy/gateway/internal/logging" + "github.com/envoyproxy/gateway/internal/message" "github.com/envoyproxy/gateway/internal/provider/kubernetes/test" ) @@ -854,6 +855,7 @@ func TestValidateServiceForReconcile(t *testing.T) { classController: egv1a1.GatewayControllerName, log: logger, mergeGateways: sets.New[string]("test-mg"), + resources: &message.ProviderResources{}, grpcRouteCRDExists: true, tcpRouteCRDExists: true, udpRouteCRDExists: true, @@ -972,6 +974,7 @@ func TestValidateObjectForReconcile(t *testing.T) { classController: egv1a1.GatewayControllerName, log: logger, mergeGateways: sets.New[string]("test-mg"), + resources: &message.ProviderResources{}, } for _, tc := range testCases { diff --git a/internal/provider/kubernetes/status.go b/internal/provider/kubernetes/status.go index a59eb82f75a..d9ff03f9b66 100644 --- a/internal/provider/kubernetes/status.go +++ b/internal/provider/kubernetes/status.go @@ -10,7 +10,6 @@ import ( "fmt" "reflect" - kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,6 +27,35 @@ import ( // subscribeAndUpdateStatus subscribes to gateway API object status updates and // writes it into the Kubernetes API Server. func (r *gatewayAPIReconciler) subscribeAndUpdateStatus(ctx context.Context, extensionManagerEnabled bool) { + // GatewayClass object status updater + go func() { + message.HandleSubscription( + message.Metadata{Runner: string(egv1a1.LogComponentProviderRunner), Message: "gatewayclass-status"}, + r.resources.GatewayClassStatuses.Subscribe(ctx), + func(update message.Update[types.NamespacedName, *gwapiv1.GatewayClassStatus], errChan chan error) { + // skip delete updates. + if update.Delete { + return + } + + r.statusUpdater.Send(Update{ + NamespacedName: update.Key, + Resource: new(gwapiv1.GatewayClass), + Mutator: MutatorFunc(func(obj client.Object) client.Object { + gc, ok := obj.(*gwapiv1.GatewayClass) + if !ok { + panic(fmt.Sprintf("unsupported object type %T", obj)) + } + gcCopy := gc.DeepCopy() + gcCopy.Status = *update.Value + return gcCopy + }), + }) + }, + ) + r.log.Info("gatewayclass status subscriber shutting down") + }() + // Gateway object status updater go func() { message.HandleSubscription( @@ -564,34 +592,3 @@ func (r *gatewayAPIReconciler) updateStatusForGateway(ctx context.Context, gtw * }), }) } - -func (r *gatewayAPIReconciler) updateStatusForGatewayClass( - ctx context.Context, - gc *gwapiv1.GatewayClass, - accepted bool, - reason, - msg string, -) error { - if r.statusUpdater != nil { - r.statusUpdater.Send(Update{ - NamespacedName: types.NamespacedName{Name: gc.Name}, - Resource: &gwapiv1.GatewayClass{}, - Mutator: MutatorFunc(func(obj client.Object) client.Object { - gc, ok := obj.(*gwapiv1.GatewayClass) - if !ok { - panic(fmt.Sprintf("unsupported object type %T", obj)) - } - - return status.SetGatewayClassAccepted(gc.DeepCopy(), accepted, reason, msg) - }), - }) - } else { - // this branch makes testing easier by not going through the status.Updater. - duplicate := status.SetGatewayClassAccepted(gc.DeepCopy(), accepted, reason, msg) - - if err := r.client.Status().Update(ctx, duplicate); err != nil && !kerrors.IsNotFound(err) { - return fmt.Errorf("error updating status of gatewayclass %s: %w", duplicate.Name, err) - } - } - return nil -} diff --git a/release-notes/current.yaml b/release-notes/current.yaml index 123759f7a84..3f281a27737 100644 --- a/release-notes/current.yaml +++ b/release-notes/current.yaml @@ -22,6 +22,7 @@ bug fixes: | Fixed BackendTLSPolicy didn't support using port name as the sectionName in the targetRefs Fixed reference grant from EnvoyExtensionPolicy to referenced ext-proc backend not respected Fixed BackendTrafficPolicy not applying to Gateway Route when Route has a Request Timeout defined + Fixed proxies connected to the secondary EG were not receiving xDS configuration # Enhancements that improve performance. performance improvements: | From 1843a611ed1e11643462a14bbb255afddc99f544 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:11:49 -0800 Subject: [PATCH 02/16] build(deps): bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /tools/src/helm-docs (#4902) build(deps): bump golang.org/x/crypto in /tools/src/helm-docs Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.21.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.21.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: zirain --- tools/src/helm-docs/go.mod | 6 +++--- tools/src/helm-docs/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/src/helm-docs/go.mod b/tools/src/helm-docs/go.mod index 05180f8b0f0..387049355d4 100644 --- a/tools/src/helm-docs/go.mod +++ b/tools/src/helm-docs/go.mod @@ -30,9 +30,9 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.15.2 // indirect diff --git a/tools/src/helm-docs/go.sum b/tools/src/helm-docs/go.sum index 74600b6b954..9c334edb251 100644 --- a/tools/src/helm-docs/go.sum +++ b/tools/src/helm-docs/go.sum @@ -232,8 +232,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -362,8 +362,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -376,8 +376,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From b9ff29c7d31b32a9e2eebcc48bc4977e54e11aa9 Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Thu, 12 Dec 2024 18:19:23 -0800 Subject: [PATCH 03/16] Fix Weighted Invalid Backend Logic (#4911) * Fix Weighted Invaid Backend Logic We were not adding valid clusters where there was a invalid cluster Regression from https://github.com/envoyproxy/gateway/pull/3246 Signed-off-by: Arko Dasgupta * add release note Signed-off-by: Arko Dasgupta --------- Signed-off-by: Arko Dasgupta --- internal/xds/translator/route.go | 36 ++++++++----------- ...route-weighted-invalid-backend.routes.yaml | 2 ++ release-notes/current.yaml | 1 + 3 files changed, 18 insertions(+), 21 deletions(-) diff --git a/internal/xds/translator/route.go b/internal/xds/translator/route.go index 330484d41d6..414e76b8366 100644 --- a/internal/xds/translator/route.go +++ b/internal/xds/translator/route.go @@ -244,39 +244,33 @@ func buildXdsWeightedRouteAction(backendWeights *ir.BackendWeights, settings []* Weight: &wrapperspb.UInt32Value{Value: backendWeights.Invalid}, } weightedClusters = append(weightedClusters, invalidCluster) - return &routev3.RouteAction{ - // Intentionally route to a non-existent cluster and return a 500 error when it is not found - ClusterNotFoundResponseCode: routev3.RouteAction_INTERNAL_SERVER_ERROR, - ClusterSpecifier: &routev3.RouteAction_WeightedClusters{ - WeightedClusters: &routev3.WeightedCluster{ - Clusters: weightedClusters, - }, - }, - } } for _, destinationSetting := range settings { - if destinationSetting.Filters != nil { + if len(destinationSetting.Endpoints) > 0 { validCluster := &routev3.WeightedCluster_ClusterWeight{ Name: backendWeights.Name, Weight: &wrapperspb.UInt32Value{Value: *destinationSetting.Weight}, } - if len(destinationSetting.Filters.AddRequestHeaders) > 0 { - validCluster.RequestHeadersToAdd = append(validCluster.RequestHeadersToAdd, buildXdsAddedHeaders(destinationSetting.Filters.AddRequestHeaders)...) - } + if destinationSetting.Filters != nil { + if len(destinationSetting.Filters.AddRequestHeaders) > 0 { + validCluster.RequestHeadersToAdd = append(validCluster.RequestHeadersToAdd, buildXdsAddedHeaders(destinationSetting.Filters.AddRequestHeaders)...) + } - if len(destinationSetting.Filters.RemoveRequestHeaders) > 0 { - validCluster.RequestHeadersToRemove = append(validCluster.RequestHeadersToRemove, destinationSetting.Filters.RemoveRequestHeaders...) - } + if len(destinationSetting.Filters.RemoveRequestHeaders) > 0 { + validCluster.RequestHeadersToRemove = append(validCluster.RequestHeadersToRemove, destinationSetting.Filters.RemoveRequestHeaders...) + } - if len(destinationSetting.Filters.AddResponseHeaders) > 0 { - validCluster.ResponseHeadersToAdd = append(validCluster.ResponseHeadersToAdd, buildXdsAddedHeaders(destinationSetting.Filters.AddResponseHeaders)...) - } + if len(destinationSetting.Filters.AddResponseHeaders) > 0 { + validCluster.ResponseHeadersToAdd = append(validCluster.ResponseHeadersToAdd, buildXdsAddedHeaders(destinationSetting.Filters.AddResponseHeaders)...) + } - if len(destinationSetting.Filters.RemoveResponseHeaders) > 0 { - validCluster.ResponseHeadersToRemove = append(validCluster.ResponseHeadersToRemove, destinationSetting.Filters.RemoveResponseHeaders...) + if len(destinationSetting.Filters.RemoveResponseHeaders) > 0 { + validCluster.ResponseHeadersToRemove = append(validCluster.ResponseHeadersToRemove, destinationSetting.Filters.RemoveResponseHeaders...) + } } + weightedClusters = append(weightedClusters, validCluster) } } diff --git a/internal/xds/translator/testdata/out/xds-ir/http-route-weighted-invalid-backend.routes.yaml b/internal/xds/translator/testdata/out/xds-ir/http-route-weighted-invalid-backend.routes.yaml index 6b53d359a22..235dea42729 100644 --- a/internal/xds/translator/testdata/out/xds-ir/http-route-weighted-invalid-backend.routes.yaml +++ b/internal/xds/translator/testdata/out/xds-ir/http-route-weighted-invalid-backend.routes.yaml @@ -16,3 +16,5 @@ clusters: - name: invalid-backend-cluster weight: 1 + - name: first-route-dest + weight: 1 diff --git a/release-notes/current.yaml b/release-notes/current.yaml index 3f281a27737..44d8f3fbc36 100644 --- a/release-notes/current.yaml +++ b/release-notes/current.yaml @@ -23,6 +23,7 @@ bug fixes: | Fixed reference grant from EnvoyExtensionPolicy to referenced ext-proc backend not respected Fixed BackendTrafficPolicy not applying to Gateway Route when Route has a Request Timeout defined Fixed proxies connected to the secondary EG were not receiving xDS configuration + Fixed traffic splitting when some backends were invalid # Enhancements that improve performance. performance improvements: | From 7ba17172b491434714af3dbf22eb17d03d900113 Mon Sep 17 00:00:00 2001 From: "Huabing (Robin) Zhao" Date: Fri, 13 Dec 2024 10:57:01 +0800 Subject: [PATCH 04/16] chore: support k8s v1.32.x (#4898) --- .github/workflows/build_and_test.yaml | 12 ++++++------ .github/workflows/experimental_conformance.yaml | 2 +- .github/workflows/latest_release.yaml | 2 +- .github/workflows/release.yaml | 2 +- release-notes/current.yaml | 1 + site/content/en/news/releases/matrix.md | 2 +- site/content/zh/latest/install/matrix.md | 2 +- tools/hack/create-cluster.sh | 2 +- 8 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build_and_test.yaml b/.github/workflows/build_and_test.yaml index 7791b180b0c..222cfbc42bd 100644 --- a/.github/workflows/build_and_test.yaml +++ b/.github/workflows/build_and_test.yaml @@ -85,7 +85,7 @@ jobs: strategy: fail-fast: false matrix: - version: [ v1.28.13, v1.29.8, v1.30.4, v1.31.0 ] + version: [ v1.29.10, v1.30.6, v1.31.4, v1.32.0 ] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./tools/github-actions/setup-deps @@ -115,14 +115,14 @@ jobs: fail-fast: false matrix: target: - - version: v1.28.13 + - version: v1.29.10 ipFamily: ipv4 - - version: v1.29.8 + - version: v1.30.6 ipFamily: ipv4 - - version: v1.30.4 + - version: v1.31.4 ipFamily: ipv6 # only run ipv6 test on this version to save time # TODO: this's IPv4 first, need a way to test IPv6 first. - - version: v1.31.0 + - version: v1.32.0 ipFamily: dual # only run dual test on latest version to save time steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -163,7 +163,7 @@ jobs: # Benchmark - name: Run Benchmark tests env: - KIND_NODE_TAG: v1.28.13 + KIND_NODE_TAG: v1.29.10 IMAGE_PULL_POLICY: IfNotPresent # Args for benchmark test BENCHMARK_RPS: 10000 diff --git a/.github/workflows/experimental_conformance.yaml b/.github/workflows/experimental_conformance.yaml index e2b43edfbba..f2de92a63b7 100644 --- a/.github/workflows/experimental_conformance.yaml +++ b/.github/workflows/experimental_conformance.yaml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - version: [ v1.28.13, v1.29.8, v1.30.4, v1.31.0 ] + version: [ v1.29.10, v1.30.6, v1.31.4, v1.32.0 ] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./tools/github-actions/setup-deps diff --git a/.github/workflows/latest_release.yaml b/.github/workflows/latest_release.yaml index b6e7d4c6d3f..47de6b9af60 100644 --- a/.github/workflows/latest_release.yaml +++ b/.github/workflows/latest_release.yaml @@ -31,7 +31,7 @@ jobs: # Benchmark - name: Run Benchmark tests env: - KIND_NODE_TAG: v1.28.13 + KIND_NODE_TAG: v1.29.10 IMAGE_PULL_POLICY: IfNotPresent # Args for benchmark test BENCHMARK_RPS: 10000 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 7aea9e9d700..47636612dbf 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -24,7 +24,7 @@ jobs: # Benchmark - name: Run Benchmark tests env: - KIND_NODE_TAG: v1.28.13 + KIND_NODE_TAG: v1.29.10 IMAGE_PULL_POLICY: IfNotPresent # Args for benchmark test BENCHMARK_RPS: 10000 diff --git a/release-notes/current.yaml b/release-notes/current.yaml index 44d8f3fbc36..c012a49f04a 100644 --- a/release-notes/current.yaml +++ b/release-notes/current.yaml @@ -36,3 +36,4 @@ deprecations: | # Other notable changes not covered by the above sections. Other changes: | [SecurityPolicy] Modify the JWT Provider Issuer validation constraint + Add support for Kubernetes 1.32.x in the test matrix, and remove support for Kubernetes 1.28.x. diff --git a/site/content/en/news/releases/matrix.md b/site/content/en/news/releases/matrix.md index 8fbb021322a..d798d85fb40 100644 --- a/site/content/en/news/releases/matrix.md +++ b/site/content/en/news/releases/matrix.md @@ -7,7 +7,7 @@ Envoy Gateway relies on the Envoy Proxy and the Gateway API, and runs within a K | Envoy Gateway version | Envoy Proxy version | Rate Limit version | Gateway API version | Kubernetes version | |-----------------------|-----------------------------|--------------------|---------------------|----------------------------| -| latest | **dev-latest** | **master** | **v1.2.0** | v1.28, v1.29, v1.30, v1.31 | +| latest | **dev-latest** | **master** | **v1.2.0** | v1.29, v1.30, v1.31, v1.32 | | v1.2 | **distroless-v1.32.1** | **28b1629a** | **v1.2.0** | v1.28, v1.29, v1.30, v1.31 | | v1.1 | **distroless-v1.31.0** | **91484c59** | **v1.1.0** | v1.27, v1.28, v1.29, v1.30 | | v1.0 | **distroless-v1.29.2** | **19f2079f** | **v1.0.0** | v1.26, v1.27, v1.28, v1.29 | diff --git a/site/content/zh/latest/install/matrix.md b/site/content/zh/latest/install/matrix.md index aa5a7e79cdd..54143a09df2 100644 --- a/site/content/zh/latest/install/matrix.md +++ b/site/content/zh/latest/install/matrix.md @@ -15,4 +15,4 @@ Envoy Gateway 依赖于 Envoy Proxy 和 Gateway API,并在 Kubernetes 集群 | v0.4.0 | **v1.26-latest** | **542a6047** | **v0.6.2** | v1.25, v1.26, v1.27 | | v0.3.0 | **v1.25-latest** | **f28024e3** | **v0.6.1** | v1.24, v1.25, v1.26 | | v0.2.0 | **v1.23-latest** | | **v0.5.1** | v1.24 | -| latest | **dev-latest** | **master** | **v1.0.0** | v1.26, v1.27, v1.28, v1.29 | +| latest | **dev-latest** | **master** | **v1.0.0** | v1.29, v1.30, v1.31, v1.32 | diff --git a/tools/hack/create-cluster.sh b/tools/hack/create-cluster.sh index d1601cb83b6..c779aa0d6f3 100755 --- a/tools/hack/create-cluster.sh +++ b/tools/hack/create-cluster.sh @@ -5,7 +5,7 @@ set -euo pipefail # Setup default values CLUSTER_NAME=${CLUSTER_NAME:-"envoy-gateway"} METALLB_VERSION=${METALLB_VERSION:-"v0.13.10"} -KIND_NODE_TAG=${KIND_NODE_TAG:-"v1.31.0"} +KIND_NODE_TAG=${KIND_NODE_TAG:-"v1.32.0"} NUM_WORKERS=${NUM_WORKERS:-""} IP_FAMILY=${IP_FAMILY:-"ipv4"} From 260e4e573b3dbd78202e40e960c74ee220f4a894 Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Thu, 12 Dec 2024 18:59:20 -0800 Subject: [PATCH 05/16] update docsy version (#4914) update docsy Signed-off-by: Arko Dasgupta --- site/package.json | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/site/package.json b/site/package.json index 2ea4bc1f1ae..843d2e60434 100644 --- a/site/package.json +++ b/site/package.json @@ -1,25 +1,28 @@ { "name": "docsy-example-site", - "version": "0.7.1", + "version": "0.10.0", + "version.next": "0.10.1-dev.0-unreleased", "description": "Example site that uses Docsy theme for technical documentation.", "repository": "github:google/docsy-example", "homepage": "https://example.docsy.dev", "author": "Docsy Authors", "license": "Apache-2.0", "bugs": "https://github.com/google/docsy-example/issues", - "spelling": "cSpell:ignore HTMLTEST precheck postbuild -", + "spelling": "cSpell:ignore docsy hugo htmltest precheck postbuild rtlcss -", "scripts": { - "_build": "npm run _hugo-dev", + "_build": "npm run _hugo-dev --", "_check:links": "echo IMPLEMENTATION PENDING for check-links; echo", "_hugo": "hugo --cleanDestinationDir", - "_hugo-dev": "npm run _hugo -- -e dev -DFE --baseURL http://localhost --bind 0.0.0.0", - "_serve": "npm run _hugo-dev -- --minify serve", + "_hugo-dev": "npm run _hugo -- -e dev -DFE", + "_local": "npx cross-env HUGO_MODULE_WORKSPACE=docsy.work", + "_serve": "npm run _hugo-dev -- --minify serve --renderToMemory", "build:preview": "npm run _hugo-dev -- --minify --baseURL \"${DEPLOY_PRIME_URL:-/}\"", "build:production": "npm run _hugo -- --minify", - "build": "npm run _build", + "build": "npm run _build -- ", "check:links:all": "HTMLTEST_ARGS= npm run _check:links", "check:links": "npm run _check:links", "clean": "rm -Rf public/* resources", + "local": "npm run _local -- npm run", "make:public": "git init -b main public", "precheck:links:all": "npm run build", "precheck:links": "npm run build", @@ -27,12 +30,23 @@ "postbuild:production": "npm run _check:links", "serve": "npm run _serve", "test": "npm run check:links", - "update:pkg:dep": "npm install --save-dev autoprefixer@latest postcss-cli@latest", - "update:pkg:hugo": "npm install --save-dev --save-exact hugo-extended@latest" + "update:dep": "npm install --save-dev autoprefixer@latest postcss-cli@latest", + "update:hugo": "npm install --save-dev --save-exact hugo-extended@latest", + "update:pkgs": "npx npm-check-updates -u" }, "devDependencies": { - "autoprefixer": "^10.4.14", - "hugo-extended": "0.128.0", - "postcss-cli": "^11.0.0" + "autoprefixer": "^10.4.20", + "cross-env": "^7.0.3", + "hugo-extended": "0.136.2", + "postcss-cli": "^11.0.0", + "rtlcss": "^4.3.0" + }, + "optionalDependencies": { + "npm-check-updates": "^17.1.4" + }, + "private": true, + "prettier": { + "proseWrap": "always", + "singleQuote": true } } From 23d5c9fe8bb108a093270792dd1d8a8cd89bfc15 Mon Sep 17 00:00:00 2001 From: "Huabing (Robin) Zhao" Date: Fri, 13 Dec 2024 11:04:48 +0800 Subject: [PATCH 06/16] v1.2.4 release note (#4915) * v1.2.4 release note Signed-off-by: Huabing Zhao * fix grammar Signed-off-by: Huabing Zhao * minor change Signed-off-by: Huabing Zhao * minor change Signed-off-by: Huabing Zhao * minor change Signed-off-by: Huabing Zhao --------- Signed-off-by: Huabing Zhao --- VERSION | 2 +- release-notes/current.yaml | 5 ----- release-notes/v1.2.4.yaml | 11 +++++++++++ site/content/en/news/releases/notes/v1.2.4.md | 16 ++++++++++++++++ 4 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 release-notes/v1.2.4.yaml create mode 100644 site/content/en/news/releases/notes/v1.2.4.md diff --git a/VERSION b/VERSION index 4367f900087..c7cd5b26796 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.2.3 +v1.2.4 diff --git a/release-notes/current.yaml b/release-notes/current.yaml index c012a49f04a..29e3b8fbdb6 100644 --- a/release-notes/current.yaml +++ b/release-notes/current.yaml @@ -19,11 +19,6 @@ new features: | # Fixes for bugs identified in previous versions. bug fixes: | - Fixed BackendTLSPolicy didn't support using port name as the sectionName in the targetRefs - Fixed reference grant from EnvoyExtensionPolicy to referenced ext-proc backend not respected - Fixed BackendTrafficPolicy not applying to Gateway Route when Route has a Request Timeout defined - Fixed proxies connected to the secondary EG were not receiving xDS configuration - Fixed traffic splitting when some backends were invalid # Enhancements that improve performance. performance improvements: | diff --git a/release-notes/v1.2.4.yaml b/release-notes/v1.2.4.yaml new file mode 100644 index 00000000000..a188d54aec7 --- /dev/null +++ b/release-notes/v1.2.4.yaml @@ -0,0 +1,11 @@ +date: December 13, 2024 + +bug fixes: | + Fixed BackendTLSPolicy not supporting the use of a port name as the sectionName in targetRefs. + Fixed reference grant from EnvoyExtensionPolicy to the referenced ext-proc backend not being respected. + Fixed BackendTrafficPolicy not applying to Gateway Routes when a Route has a Request Timeout defined. + Fixed proxies connected to the secondary Envoy Gateway not receiving xDS configuration. + Fixed traffic splitting not working when some backends were invalid. + +Other changes: | + Bumped Envoy to version 1.32.2. diff --git a/site/content/en/news/releases/notes/v1.2.4.md b/site/content/en/news/releases/notes/v1.2.4.md new file mode 100644 index 00000000000..6ec26d2c2e6 --- /dev/null +++ b/site/content/en/news/releases/notes/v1.2.4.md @@ -0,0 +1,16 @@ +--- +title: "v1.2.4" +publishdate: 2024-12-13 +--- + +Date: December 13, 2024 + +## Bug fixes +- Fixed BackendTLSPolicy not supporting the use of a port name as the sectionName in targetRefs. +- Fixed reference grant from EnvoyExtensionPolicy to the referenced ext-proc backend not being respected. +- Fixed BackendTrafficPolicy not applying to Gateway Routes when a Route has a Request Timeout defined. +- Fixed proxies connected to the secondary Envoy Gateway not receiving xDS configuration. +- Fixed traffic splitting not working when some backends were invalid. + +## Other changes +- Bumped Envoy to version 1.32.2. From dede41502fc5799e1d22d52e42a7a4b23fb6a13b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 11:30:43 +0800 Subject: [PATCH 07/16] build(deps): bump golang.org/x/crypto from 0.22.0 to 0.31.0 in /tools/src/crd-ref-docs (#4903) build(deps): bump golang.org/x/crypto in /tools/src/crd-ref-docs Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.22.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.22.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: zirain --- tools/src/crd-ref-docs/go.mod | 12 ++++++------ tools/src/crd-ref-docs/go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/src/crd-ref-docs/go.mod b/tools/src/crd-ref-docs/go.mod index 5d5bcd374a3..009bccd7145 100644 --- a/tools/src/crd-ref-docs/go.mod +++ b/tools/src/crd-ref-docs/go.mod @@ -29,13 +29,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.20.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/tools/src/crd-ref-docs/go.sum b/tools/src/crd-ref-docs/go.sum index 8bfb30cec1a..57b2304c45a 100644 --- a/tools/src/crd-ref-docs/go.sum +++ b/tools/src/crd-ref-docs/go.sum @@ -95,8 +95,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= @@ -105,30 +105,30 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From d33b617d970771f3d04689812246e7165c1be6af Mon Sep 17 00:00:00 2001 From: "Huabing (Robin) Zhao" Date: Fri, 13 Dec 2024 12:39:11 +0800 Subject: [PATCH 08/16] docs: update site link to 1.2.4 (#4918) update site link to 1.2.4 Signed-off-by: Huabing Zhao --- site/layouts/shortcodes/helm-version.html | 4 ++-- site/layouts/shortcodes/yaml-version.html | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/site/layouts/shortcodes/helm-version.html b/site/layouts/shortcodes/helm-version.html index b21ca9586b8..433b5e1bf46 100644 --- a/site/layouts/shortcodes/helm-version.html +++ b/site/layouts/shortcodes/helm-version.html @@ -6,8 +6,8 @@ {{- "v1.1.4" -}} {{- end -}} {{- with (strings.HasPrefix $pagePrefix "v1.2") -}} -{{- "v1.2.3" -}} +{{- "v1.2.4" -}} {{- end -}} {{- with (strings.HasPrefix $pagePrefix "doc") -}} -{{- "v1.2.3" -}} +{{- "v1.2.4" -}} {{- end -}} diff --git a/site/layouts/shortcodes/yaml-version.html b/site/layouts/shortcodes/yaml-version.html index d68a435454c..6de788e0b65 100644 --- a/site/layouts/shortcodes/yaml-version.html +++ b/site/layouts/shortcodes/yaml-version.html @@ -6,8 +6,8 @@ {{- "v1.1.4" -}} {{- end -}} {{- with (strings.HasPrefix $pagePrefix "v1.2") -}} -{{- "v1.2.3" -}} +{{- "v1.2.4" -}} {{- end -}} {{- with (strings.HasPrefix $pagePrefix "doc") -}} -{{- "v1.2.3" -}} +{{- "v1.2.4" -}} {{- end -}} From e6a74f8ac825b272c656f1e80f82098d56f9da54 Mon Sep 17 00:00:00 2001 From: zirain Date: Fri, 13 Dec 2024 12:47:53 +0800 Subject: [PATCH 09/16] chore: bump and fix gen (#4917) * build(deps): bump sigs.k8s.io/controller-runtime from 0.19.2 to 0.19.3 Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.19.2 to 0.19.3. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.19.2...v0.19.3) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * fix gen Signed-off-by: zirain * build(deps): bump go.opentelemetry.io/proto/otlp Bumps the go-opentelemetry-io group with 1 update in the / directory: [go.opentelemetry.io/proto/otlp](https://github.com/open-telemetry/opentelemetry-proto-go). Updates `go.opentelemetry.io/proto/otlp` from 1.3.1 to 1.4.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-proto-go/releases) - [Commits](https://github.com/open-telemetry/opentelemetry-proto-go/compare/v1.3.1...v1.4.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/proto/otlp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] * fix gen Signed-off-by: zirain * add google.golang.org Signed-off-by: zirain --------- Signed-off-by: dependabot[bot] Signed-off-by: zirain Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/dependabot.yml | 2 ++ examples/extension-server/go.mod | 6 +++--- examples/extension-server/go.sum | 12 ++++++------ go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 5 files changed, 29 insertions(+), 27 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 938a0fa2c49..03b7824c4da 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -30,12 +30,14 @@ updates: k8s.io: patterns: - "k8s.io/*" + - "sigs.k8s.io/*" go.opentelemetry.io: patterns: - "go.opentelemetry.io/*" golang.org: patterns: - "golang.org/*" + - "google.golang.org/*" - package-ecosystem: pip directories: - /tools/src/codespell diff --git a/examples/extension-server/go.mod b/examples/extension-server/go.mod index 1c648fd94fc..9d2994afdb0 100644 --- a/examples/extension-server/go.mod +++ b/examples/extension-server/go.mod @@ -9,7 +9,7 @@ require ( google.golang.org/grpc v1.68.1 google.golang.org/protobuf v1.35.2 k8s.io/apimachinery v0.31.3 - sigs.k8s.io/controller-runtime v0.19.2 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/gateway-api v1.2.1 ) @@ -34,8 +34,8 @@ require ( golang.org/x/net v0.32.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/examples/extension-server/go.sum b/examples/extension-server/go.sum index 3904c3d700d..2c209e9586d 100644 --- a/examples/extension-server/go.sum +++ b/examples/extension-server/go.sum @@ -106,10 +106,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= @@ -132,8 +132,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20240821151609-f90d01438635 h1:2wThSvJoW/Ncn9TmQEYXRnevZXi2duqHWf5OX9S3zjI= k8s.io/utils v0.0.0-20240821151609-f90d01438635/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM= sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/go.mod b/go.mod index 08376c948bd..19e5233b471 100644 --- a/go.mod +++ b/go.mod @@ -49,12 +49,12 @@ require ( go.opentelemetry.io/otel/metric v1.32.0 go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/sdk/metric v1.32.0 - go.opentelemetry.io/proto/otlp v1.3.1 + go.opentelemetry.io/proto/otlp v1.4.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/net v0.32.0 golang.org/x/sys v0.28.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 + google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 google.golang.org/grpc v1.68.1 google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v3 v3.0.1 @@ -67,7 +67,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.31.3 k8s.io/utils v0.0.0-20240821151609-f90d01438635 - sigs.k8s.io/controller-runtime v0.19.2 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/gateway-api v1.2.1 sigs.k8s.io/kubectl-validate v0.0.5-0.20240827210056-ce13d95db263 sigs.k8s.io/mcs-api v0.1.0 @@ -159,7 +159,7 @@ require ( github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect @@ -266,14 +266,14 @@ require ( golang.org/x/crypto v0.31.0 // indirect golang.org/x/crypto/x509roots/fallback v0.0.0-20240904212608-c9da6b9a4008 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 08b3df5cc9f..93c0c577c2d 100644 --- a/go.sum +++ b/go.sum @@ -448,8 +448,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -918,8 +918,8 @@ go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiy go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -985,8 +985,8 @@ golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1086,10 +1086,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -1213,8 +1213,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM= sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= From 920a13cafe16de646e7e260ea2ebb29a701b96b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 09:25:53 +0800 Subject: [PATCH 10/16] build(deps): bump github/codeql-action from 3.27.6 to 3.27.9 (#4921) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.6 to 3.27.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/aa578102511db1f4524ed59b8cc2bae4f6e88195...df409f7d9260372bd5f19e5b04e83cb3c43714ae) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d29fc81234f..a0f9fd211f1 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -36,14 +36,14 @@ jobs: - uses: ./tools/github-actions/setup-deps - name: Initialize CodeQL - uses: github/codeql-action/init@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/init@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/autobuild@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/analyze@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 07f19a968f2..8023a5d51c0 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -40,6 +40,6 @@ jobs: retention-days: 5 - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 + uses: github/codeql-action/upload-sarif@df409f7d9260372bd5f19e5b04e83cb3c43714ae # v3.27.9 with: sarif_file: results.sarif From 172cbb286fd391a42b54ff515e0d02a56a21eaf1 Mon Sep 17 00:00:00 2001 From: keithfz Date: Fri, 13 Dec 2024 20:27:06 -0500 Subject: [PATCH 11/16] feat: support patching on EnvoyProxy.spec.provider.kubernetes.envoyHpa and EnvoyProxy.spec.provider.kubernetes.envoyPDB (#4910) * Add patch field for envoyHPA and envoyPDB in EnvoyGateway API Signed-off-by: keithfz * It's actually currently the envoyPDB field, not envoyPdb Signed-off-by: keithfz * fix comment Signed-off-by: keithfz * fix error messages Signed-off-by: keithfz * Add validation for hpa and pdb Signed-off-by: keithfz * lint and gen-check Signed-off-by: keithfz * adding test coverage Signed-off-by: keithfz * lint Signed-off-by: keithfz --------- Signed-off-by: keithfz --- api/v1alpha1/kubernetes_helpers.go | 74 +++++++ api/v1alpha1/shared_types.go | 10 + .../validation/envoyproxy_validate.go | 38 ++++ .../validation/envoyproxy_validate_test.go | 186 ++++++++++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 10 + .../gateway.envoyproxy.io_envoyproxies.yaml | 34 ++++ .../kubernetes/proxy/resource_provider.go | 23 ++- .../proxy/resource_provider_test.go | 52 +++++ .../proxy/testdata/hpa/patch-json-hpa.yaml | 21 ++ .../testdata/hpa/patch-strategic-hpa.yaml | 21 ++ .../proxy/testdata/pdb/patch-json-pdb.yaml | 15 ++ .../testdata/pdb/patch-strategic-pdb.yaml | 10 + release-notes/current.yaml | 1 + site/content/en/latest/api/extension_types.md | 4 + site/content/zh/latest/api/extension_types.md | 4 + 15 files changed, 497 insertions(+), 6 deletions(-) create mode 100644 internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-json-hpa.yaml create mode 100644 internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-strategic-hpa.yaml create mode 100644 internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-json-pdb.yaml create mode 100644 internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-strategic-pdb.yaml diff --git a/api/v1alpha1/kubernetes_helpers.go b/api/v1alpha1/kubernetes_helpers.go index 1ac790b9c13..761f880d29b 100644 --- a/api/v1alpha1/kubernetes_helpers.go +++ b/api/v1alpha1/kubernetes_helpers.go @@ -11,7 +11,9 @@ import ( jsonpatch "github.com/evanphx/json-patch" appsv1 "k8s.io/api/apps/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/utils/ptr" @@ -263,3 +265,75 @@ func (service *KubernetesServiceSpec) ApplyMergePatch(old *corev1.Service) (*cor return &patchedService, nil } + +// ApplyMergePatch applies a merge patch to a HorizontalPodAutoscaler based on the merge type +func (hpa *KubernetesHorizontalPodAutoscalerSpec) ApplyMergePatch(old *autoscalingv2.HorizontalPodAutoscaler) (*autoscalingv2.HorizontalPodAutoscaler, error) { + if hpa.Patch == nil { + return old, nil + } + + var patchedJSON []byte + var err error + + // Serialize the current HPA to JSON + originalJSON, err := json.Marshal(old) + if err != nil { + return nil, fmt.Errorf("error marshaling original HorizontalPodAutoscaler: %w", err) + } + + switch { + case hpa.Patch.Type == nil || *hpa.Patch.Type == StrategicMerge: + patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, hpa.Patch.Value.Raw, autoscalingv2.HorizontalPodAutoscaler{}) + case *hpa.Patch.Type == JSONMerge: + patchedJSON, err = jsonpatch.MergePatch(originalJSON, hpa.Patch.Value.Raw) + default: + return nil, fmt.Errorf("unsupported merge type: %s", *hpa.Patch.Type) + } + if err != nil { + return nil, fmt.Errorf("error applying merge patch: %w", err) + } + + // Deserialize the patched JSON into a new HorizontalPodAutoscaler object + var patchedHpa autoscalingv2.HorizontalPodAutoscaler + if err := json.Unmarshal(patchedJSON, &patchedHpa); err != nil { + return nil, fmt.Errorf("error unmarshaling patched HorizontalPodAutoscaler: %w", err) + } + + return &patchedHpa, nil +} + +// ApplyMergePatch applies a merge patch to a PodDisruptionBudget based on the merge type +func (pdb *KubernetesPodDisruptionBudgetSpec) ApplyMergePatch(old *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, error) { + if pdb.Patch == nil { + return old, nil + } + + var patchedJSON []byte + var err error + + // Serialize the PDB deployment to JSON + originalJSON, err := json.Marshal(old) + if err != nil { + return nil, fmt.Errorf("error marshaling original PodDisruptionBudget: %w", err) + } + + switch { + case pdb.Patch.Type == nil || *pdb.Patch.Type == StrategicMerge: + patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, pdb.Patch.Value.Raw, policyv1.PodDisruptionBudget{}) + case *pdb.Patch.Type == JSONMerge: + patchedJSON, err = jsonpatch.MergePatch(originalJSON, pdb.Patch.Value.Raw) + default: + return nil, fmt.Errorf("unsupported merge type: %s", *pdb.Patch.Type) + } + if err != nil { + return nil, fmt.Errorf("error applying merge patch: %w", err) + } + + // Deserialize the patched JSON into a new HorizontalPodAutoscaler object + var patchedPdb policyv1.PodDisruptionBudget + if err := json.Unmarshal(patchedJSON, &patchedPdb); err != nil { + return nil, fmt.Errorf("error unmarshaling patched PodDisruptionBudget: %w", err) + } + + return &patchedPdb, nil +} diff --git a/api/v1alpha1/shared_types.go b/api/v1alpha1/shared_types.go index b79839a7dda..036054dc47e 100644 --- a/api/v1alpha1/shared_types.go +++ b/api/v1alpha1/shared_types.go @@ -406,6 +406,11 @@ type KubernetesPodDisruptionBudgetSpec struct { // and resilience during maintenance operations. // +optional MinAvailable *int32 `json:"minAvailable,omitempty"` + + // Patch defines how to perform the patch operation to the PodDisruptionBudget + // + // +optional + Patch *KubernetesPatchSpec `json:"patch,omitempty"` } // KubernetesHorizontalPodAutoscalerSpec defines Kubernetes Horizontal Pod Autoscaler settings of Envoy Proxy Deployment. @@ -443,6 +448,11 @@ type KubernetesHorizontalPodAutoscalerSpec struct { // // +optional Behavior *autoscalingv2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"` + + // Patch defines how to perform the patch operation to the HorizontalPodAutoscaler + // + // +optional + Patch *KubernetesPatchSpec `json:"patch,omitempty"` } // HTTPStatus defines the http status code. diff --git a/api/v1alpha1/validation/envoyproxy_validate.go b/api/v1alpha1/validation/envoyproxy_validate.go index 74ce4e0451c..a13fdacbd3d 100644 --- a/api/v1alpha1/validation/envoyproxy_validate.go +++ b/api/v1alpha1/validation/envoyproxy_validate.go @@ -72,6 +72,14 @@ func validateProvider(spec *egv1a1.EnvoyProxySpec) []error { if len(validateDeploymentErrs) != 0 { errs = append(errs, validateDeploymentErrs...) } + validateHpaErrors := validateHpa(spec) + if len(validateHpaErrors) != 0 { + errs = append(errs, validateHpaErrors...) + } + validatePdbErrors := validatePdb(spec) + if len(validatePdbErrors) != 0 { + errs = append(errs, validatePdbErrors...) + } validateServiceErrs := validateService(spec) if len(validateServiceErrs) != 0 { errs = append(errs, validateServiceErrs...) @@ -95,6 +103,36 @@ func validateDeployment(spec *egv1a1.EnvoyProxySpec) []error { return errs } +func validateHpa(spec *egv1a1.EnvoyProxySpec) []error { + var errs []error + if spec.Provider.Kubernetes != nil && spec.Provider.Kubernetes.EnvoyHpa != nil { + if patch := spec.Provider.Kubernetes.EnvoyHpa.Patch; patch != nil { + if patch.Value.Raw == nil { + errs = append(errs, fmt.Errorf("envoy hpa patch object cannot be empty")) + } + if patch.Type != nil && *patch.Type != egv1a1.JSONMerge && *patch.Type != egv1a1.StrategicMerge { + errs = append(errs, fmt.Errorf("unsupported envoy hpa patch type %s", *patch.Type)) + } + } + } + return errs +} + +func validatePdb(spec *egv1a1.EnvoyProxySpec) []error { + var errs []error + if spec.Provider.Kubernetes != nil && spec.Provider.Kubernetes.EnvoyPDB != nil { + if patch := spec.Provider.Kubernetes.EnvoyPDB.Patch; patch != nil { + if patch.Value.Raw == nil { + errs = append(errs, fmt.Errorf("envoy pdb patch object cannot be empty")) + } + if patch.Type != nil && *patch.Type != egv1a1.JSONMerge && *patch.Type != egv1a1.StrategicMerge { + errs = append(errs, fmt.Errorf("unsupported envoy pdb patch type %s", *patch.Type)) + } + } + } + return errs +} + // TODO: remove this function if CEL validation became stable func validateService(spec *egv1a1.EnvoyProxySpec) []error { var errs []error diff --git a/api/v1alpha1/validation/envoyproxy_validate_test.go b/api/v1alpha1/validation/envoyproxy_validate_test.go index e4b400b34dd..8a784db59ab 100644 --- a/api/v1alpha1/validation/envoyproxy_validate_test.go +++ b/api/v1alpha1/validation/envoyproxy_validate_test.go @@ -450,6 +450,192 @@ func TestValidateEnvoyProxy(t *testing.T) { }, expected: true, }, + { + name: "should be valid when pdb patch type and patch are empty", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyPDB: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Value: apiextensionsv1.JSON{ + Raw: []byte{}, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "should be valid when pdb patch and type are set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyPDB: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{}"), + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "should be invalid when pdb patch not set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyPDB: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "should be invalid when pdb type not set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyPDB: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "should be valid when hpa patch and type are empty", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyHpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Value: apiextensionsv1.JSON{ + Raw: []byte{}, + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "should be valid when hpa patch and type are set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyHpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{}"), + }, + }, + }, + }, + }, + }, + }, + expected: true, + }, + { + name: "should be invalid when hpa patch not set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyHpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + }, + }, + }, + }, + }, + }, + expected: false, + }, + { + name: "should be invalid when hpa type not set", + proxy: &egv1a1.EnvoyProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: egv1a1.EnvoyProxySpec{ + Provider: &egv1a1.EnvoyProxyProvider{ + Type: egv1a1.ProviderTypeKubernetes, + Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyHpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + }, + }, + }, + }, + }, + }, + expected: false, + }, { name: "should invalid when patch object is empty", proxy: &egv1a1.EnvoyProxy{ diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 742ffed1b25..dbc28e6aca2 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -3515,6 +3515,11 @@ func (in *KubernetesHorizontalPodAutoscalerSpec) DeepCopyInto(out *KubernetesHor *out = new(v2.HorizontalPodAutoscalerBehavior) (*in).DeepCopyInto(*out) } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = new(KubernetesPatchSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesHorizontalPodAutoscalerSpec. @@ -3556,6 +3561,11 @@ func (in *KubernetesPodDisruptionBudgetSpec) DeepCopyInto(out *KubernetesPodDisr *out = new(int32) **out = **in } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = new(KubernetesPatchSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesPodDisruptionBudgetSpec. diff --git a/charts/gateway-helm/crds/generated/gateway.envoyproxy.io_envoyproxies.yaml b/charts/gateway-helm/crds/generated/gateway.envoyproxy.io_envoyproxies.yaml index 84fb126a79b..1a262dd466b 100644 --- a/charts/gateway-helm/crds/generated/gateway.envoyproxy.io_envoyproxies.yaml +++ b/charts/gateway-helm/crds/generated/gateway.envoyproxy.io_envoyproxies.yaml @@ -10109,6 +10109,23 @@ spec: x-kubernetes-validations: - message: minReplicas must be greater than 0 rule: self > 0 + patch: + description: Patch defines how to perform the patch operation + to the HorizontalPodAutoscaler + properties: + type: + description: |- + Type is the type of merge operation to perform + + By default, StrategicMerge is used as the patch type. + type: string + value: + description: Object contains the raw configuration + for merged object + x-kubernetes-preserve-unknown-fields: true + required: + - value + type: object required: - maxReplicas type: object @@ -10126,6 +10143,23 @@ spec: and resilience during maintenance operations. format: int32 type: integer + patch: + description: Patch defines how to perform the patch operation + to the PodDisruptionBudget + properties: + type: + description: |- + Type is the type of merge operation to perform + + By default, StrategicMerge is used as the patch type. + type: string + value: + description: Object contains the raw configuration + for merged object + x-kubernetes-preserve-unknown-fields: true + required: + - value + type: object type: object envoyService: description: |- diff --git a/internal/infrastructure/kubernetes/proxy/resource_provider.go b/internal/infrastructure/kubernetes/proxy/resource_provider.go index 9830bafad71..9c25886a6bf 100644 --- a/internal/infrastructure/kubernetes/proxy/resource_provider.go +++ b/internal/infrastructure/kubernetes/proxy/resource_provider.go @@ -433,13 +433,13 @@ func (r *ResourceRender) PodDisruptionBudgetSpec() (*egv1a1.KubernetesPodDisrupt } func (r *ResourceRender) PodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) { - podDisruptionBudget, er := r.PodDisruptionBudgetSpec() + podDisruptionBudgetConfig, err := r.PodDisruptionBudgetSpec() // If podDisruptionBudget config is nil or MinAvailable is nil, ignore PodDisruptionBudget. - if podDisruptionBudget == nil { - return nil, er + if podDisruptionBudgetConfig == nil { + return nil, err } - return &policyv1.PodDisruptionBudget{ + podDisruptionBudget := &policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: r.Name(), Namespace: r.Namespace, @@ -449,10 +449,17 @@ func (r *ResourceRender) PodDisruptionBudget() (*policyv1.PodDisruptionBudget, e Kind: "PodDisruptionBudget", }, Spec: policyv1.PodDisruptionBudgetSpec{ - MinAvailable: &intstr.IntOrString{IntVal: ptr.Deref(podDisruptionBudget.MinAvailable, 0)}, + MinAvailable: &intstr.IntOrString{IntVal: ptr.Deref(podDisruptionBudgetConfig.MinAvailable, 0)}, Selector: r.stableSelector(), }, - }, nil + } + + // apply merge patch to PodDisruptionBudget + if podDisruptionBudget, err = podDisruptionBudgetConfig.ApplyMergePatch(podDisruptionBudget); err != nil { + return nil, err + } + + return podDisruptionBudget, nil } // HorizontalPodAutoscalerSpec returns the `HorizontalPodAutoscaler` sets spec. @@ -506,6 +513,10 @@ func (r *ResourceRender) HorizontalPodAutoscaler() (*autoscalingv2.HorizontalPod hpa.Spec.ScaleTargetRef.Name = r.Name() } + if hpa, err = hpaConfig.ApplyMergePatch(hpa); err != nil { + return nil, err + } + return hpa, nil } diff --git a/internal/infrastructure/kubernetes/proxy/resource_provider_test.go b/internal/infrastructure/kubernetes/proxy/resource_provider_test.go index ad286bfc930..0f5f6e3bf27 100644 --- a/internal/infrastructure/kubernetes/proxy/resource_provider_test.go +++ b/internal/infrastructure/kubernetes/proxy/resource_provider_test.go @@ -1298,6 +1298,32 @@ func TestPDB(t *testing.T) { MinAvailable: ptr.To(int32(1)), }, }, + { + caseName: "patch-json-pdb", + infra: newTestInfra(), + pdb: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + MinAvailable: ptr.To(int32(1)), + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.JSONMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{\"metadata\":{\"name\":\"foo\"}, \"spec\": {\"selector\": {\"matchLabels\": {\"app\": \"bar\"}}}}"), + }, + }, + }, + }, + { + caseName: "patch-strategic-pdb", + infra: newTestInfra(), + pdb: &egv1a1.KubernetesPodDisruptionBudgetSpec{ + MinAvailable: ptr.To(int32(1)), + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{\"metadata\":{\"name\":\"foo\"}, \"spec\": {\"selector\": {\"matchLabels\": {\"app\": \"bar\"}}}}"), + }, + }, + }, + }, } for _, tc := range cases { @@ -1375,6 +1401,32 @@ func TestHorizontalPodAutoscaler(t *testing.T) { }, }, }, + { + caseName: "patch-json-hpa", + infra: newTestInfra(), + hpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + MaxReplicas: ptr.To[int32](1), + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.JSONMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{\"metadata\":{\"name\":\"foo\"}, \"spec\": {\"scaleTargetRef\": {\"name\": \"bar\"}}}"), + }, + }, + }, + }, + { + caseName: "patch-strategic-hpa", + infra: newTestInfra(), + hpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ + MaxReplicas: ptr.To[int32](1), + Patch: &egv1a1.KubernetesPatchSpec{ + Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{\"metadata\":{\"name\":\"foo\"}, \"spec\": {\"metrics\": [{\"resource\": {\"name\": \"cpu\", \"target\": {\"averageUtilization\": 50, \"type\": \"Utilization\"}}, \"type\": \"Resource\"}]}}"), + }, + }, + }, + }, { caseName: "with-deployment-name", infra: newTestInfra(), diff --git a/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-json-hpa.yaml b/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-json-hpa.yaml new file mode 100644 index 00000000000..38d3d474d81 --- /dev/null +++ b/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-json-hpa.yaml @@ -0,0 +1,21 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + labels: + gateway.envoyproxy.io/owning-gateway-name: default + gateway.envoyproxy.io/owning-gateway-namespace: default + name: foo + namespace: envoy-gateway-system +spec: + metrics: + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + maxReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: bar diff --git a/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-strategic-hpa.yaml b/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-strategic-hpa.yaml new file mode 100644 index 00000000000..24a9f6f3a1d --- /dev/null +++ b/internal/infrastructure/kubernetes/proxy/testdata/hpa/patch-strategic-hpa.yaml @@ -0,0 +1,21 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + labels: + gateway.envoyproxy.io/owning-gateway-name: default + gateway.envoyproxy.io/owning-gateway-namespace: default + name: foo + namespace: envoy-gateway-system +spec: + metrics: + - resource: + name: cpu + target: + averageUtilization: 50 + type: Utilization + type: Resource + maxReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: envoy-default-37a8eec1 diff --git a/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-json-pdb.yaml b/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-json-pdb.yaml new file mode 100644 index 00000000000..cc4aa473337 --- /dev/null +++ b/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-json-pdb.yaml @@ -0,0 +1,15 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: foo + namespace: envoy-gateway-system +spec: + minAvailable: 1 + selector: + matchLabels: + app: bar + app.kubernetes.io/component: proxy + app.kubernetes.io/managed-by: envoy-gateway + app.kubernetes.io/name: envoy + gateway.envoyproxy.io/owning-gateway-name: default + gateway.envoyproxy.io/owning-gateway-namespace: default diff --git a/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-strategic-pdb.yaml b/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-strategic-pdb.yaml new file mode 100644 index 00000000000..20a25b7e1b0 --- /dev/null +++ b/internal/infrastructure/kubernetes/proxy/testdata/pdb/patch-strategic-pdb.yaml @@ -0,0 +1,10 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: foo + namespace: envoy-gateway-system +spec: + minAvailable: 1 + selector: + matchLabels: + app: bar diff --git a/release-notes/current.yaml b/release-notes/current.yaml index 29e3b8fbdb6..4d61dd6b19f 100644 --- a/release-notes/current.yaml +++ b/release-notes/current.yaml @@ -16,6 +16,7 @@ security updates: | new features: | Added support for trusted CIDRs in the ClientIPDetectionSettings API Added support for sending attributes to external processor in EnvoyExtensionPolicy API + Added support for patching EnvoyProxy.spec.provider.kubernetes.envoyHpa and EnvoyProxy.spec.provider.kubernetes.envoyPDB # Fixes for bugs identified in previous versions. bug fixes: | diff --git a/site/content/en/latest/api/extension_types.md b/site/content/en/latest/api/extension_types.md index c6a7121d7ca..5119d756646 100644 --- a/site/content/en/latest/api/extension_types.md +++ b/site/content/en/latest/api/extension_types.md @@ -2529,6 +2529,7 @@ _Appears in:_ | `maxReplicas` | _integer_ | true | maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
It cannot be less that minReplicas. | | `metrics` | _[MetricSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#metricspec-v2-autoscaling) array_ | false | metrics contains the specifications for which to use to calculate the
desired replica count (the maximum replica count across all metrics will
be used).
If left empty, it defaults to being based on CPU utilization with average on 80% usage. | | `behavior` | _[HorizontalPodAutoscalerBehavior](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#horizontalpodautoscalerbehavior-v2-autoscaling)_ | false | behavior configures the scaling behavior of the target
in both Up and Down directions (scaleUp and scaleDown fields respectively).
If not set, the default HPAScalingRules for scale up and scale down are used.
See k8s.io.autoscaling.v2.HorizontalPodAutoScalerBehavior. | +| `patch` | _[KubernetesPatchSpec](#kubernetespatchspec)_ | false | Patch defines how to perform the patch operation to the HorizontalPodAutoscaler | #### KubernetesPatchSpec @@ -2542,6 +2543,8 @@ Note also that, currently, strings containing literal JSON are _rejected_. _Appears in:_ - [KubernetesDaemonSetSpec](#kubernetesdaemonsetspec) - [KubernetesDeploymentSpec](#kubernetesdeploymentspec) +- [KubernetesHorizontalPodAutoscalerSpec](#kuberneteshorizontalpodautoscalerspec) +- [KubernetesPodDisruptionBudgetSpec](#kubernetespoddisruptionbudgetspec) - [KubernetesServiceSpec](#kubernetesservicespec) | Field | Type | Required | Description | @@ -2562,6 +2565,7 @@ _Appears in:_ | Field | Type | Required | Description | | --- | --- | --- | --- | | `minAvailable` | _integer_ | false | MinAvailable specifies the minimum number of pods that must be available at all times during voluntary disruptions,
such as node drains or updates. This setting ensures that your envoy proxy maintains a certain level of availability
and resilience during maintenance operations. | +| `patch` | _[KubernetesPatchSpec](#kubernetespatchspec)_ | false | Patch defines how to perform the patch operation to the PodDisruptionBudget | #### KubernetesPodSpec diff --git a/site/content/zh/latest/api/extension_types.md b/site/content/zh/latest/api/extension_types.md index c6a7121d7ca..5119d756646 100644 --- a/site/content/zh/latest/api/extension_types.md +++ b/site/content/zh/latest/api/extension_types.md @@ -2529,6 +2529,7 @@ _Appears in:_ | `maxReplicas` | _integer_ | true | maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
It cannot be less that minReplicas. | | `metrics` | _[MetricSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#metricspec-v2-autoscaling) array_ | false | metrics contains the specifications for which to use to calculate the
desired replica count (the maximum replica count across all metrics will
be used).
If left empty, it defaults to being based on CPU utilization with average on 80% usage. | | `behavior` | _[HorizontalPodAutoscalerBehavior](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#horizontalpodautoscalerbehavior-v2-autoscaling)_ | false | behavior configures the scaling behavior of the target
in both Up and Down directions (scaleUp and scaleDown fields respectively).
If not set, the default HPAScalingRules for scale up and scale down are used.
See k8s.io.autoscaling.v2.HorizontalPodAutoScalerBehavior. | +| `patch` | _[KubernetesPatchSpec](#kubernetespatchspec)_ | false | Patch defines how to perform the patch operation to the HorizontalPodAutoscaler | #### KubernetesPatchSpec @@ -2542,6 +2543,8 @@ Note also that, currently, strings containing literal JSON are _rejected_. _Appears in:_ - [KubernetesDaemonSetSpec](#kubernetesdaemonsetspec) - [KubernetesDeploymentSpec](#kubernetesdeploymentspec) +- [KubernetesHorizontalPodAutoscalerSpec](#kuberneteshorizontalpodautoscalerspec) +- [KubernetesPodDisruptionBudgetSpec](#kubernetespoddisruptionbudgetspec) - [KubernetesServiceSpec](#kubernetesservicespec) | Field | Type | Required | Description | @@ -2562,6 +2565,7 @@ _Appears in:_ | Field | Type | Required | Description | | --- | --- | --- | --- | | `minAvailable` | _integer_ | false | MinAvailable specifies the minimum number of pods that must be available at all times during voluntary disruptions,
such as node drains or updates. This setting ensures that your envoy proxy maintains a certain level of availability
and resilience during maintenance operations. | +| `patch` | _[KubernetesPatchSpec](#kubernetespatchspec)_ | false | Patch defines how to perform the patch operation to the PodDisruptionBudget | #### KubernetesPodSpec From 469de2f9180d31fffa4524be45faedbf3469a1fb Mon Sep 17 00:00:00 2001 From: Alex Volchok Date: Sat, 14 Dec 2024 15:02:18 +0100 Subject: [PATCH 12/16] feat: data plane & envoyproxy resilience test suite (#4862) * adding resilience test suite Signed-off-by: Alexander Volchok * fix licensecheck Signed-off-by: Alexander Volchok * fixing lint Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updadting Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok * Update test/resilience/tests/envoygateway.go Co-authored-by: Huabing (Robin) Zhao Signed-off-by: Alex Volchok * updating Signed-off-by: Alexander Volchok * updating Signed-off-by: Alexander Volchok --------- Signed-off-by: Alexander Volchok Signed-off-by: Alex Volchok Co-authored-by: Huabing (Robin) Zhao --- .github/workflows/build_and_test.yaml | 13 + test/resilience/embed.go | 13 + test/resilience/resilience_test.go | 40 +++ test/resilience/suite/flags.go | 14 + test/resilience/suite/suite.go | 115 +++++++ test/resilience/testdata/base.yaml | 110 +++++++ test/resilience/testdata/route_changes.yaml | 16 + test/resilience/tests/envoygateway.go | 253 +++++++++++++++ test/resilience/tests/envoyproxy.go | 92 ++++++ test/resilience/tests/tests.go | 12 + test/utils/kubernetes/kube.go | 322 ++++++++++++++++++++ tools/hack/create-cluster.sh | 74 ++++- tools/make/kube.mk | 8 + 13 files changed, 1076 insertions(+), 6 deletions(-) create mode 100644 test/resilience/embed.go create mode 100644 test/resilience/resilience_test.go create mode 100644 test/resilience/suite/flags.go create mode 100644 test/resilience/suite/suite.go create mode 100644 test/resilience/testdata/base.yaml create mode 100644 test/resilience/testdata/route_changes.yaml create mode 100644 test/resilience/tests/envoygateway.go create mode 100644 test/resilience/tests/envoyproxy.go create mode 100644 test/resilience/tests/tests.go create mode 100644 test/utils/kubernetes/kube.go diff --git a/.github/workflows/build_and_test.yaml b/.github/workflows/build_and_test.yaml index 222cfbc42bd..81a08eb76b3 100644 --- a/.github/workflows/build_and_test.yaml +++ b/.github/workflows/build_and_test.yaml @@ -176,6 +176,19 @@ jobs: - name: Read Benchmark report run: cat test/benchmark/benchmark_report/benchmark_report.md + resilience-test: + runs-on: ubuntu-latest + if: ${{ ! startsWith(github.event_name, 'push') }} + needs: [build] + steps: + - uses: actions/checkout@v4.2.2 + - uses: ./tools/github-actions/setup-deps + - name: Resilience Test + env: + KIND_NODE_TAG: v1.28.13 + IMAGE_PULL_POLICY: IfNotPresent + CUSTOM_CNI: "true" + run: make resilience publish: runs-on: ubuntu-latest diff --git a/test/resilience/embed.go b/test/resilience/embed.go new file mode 100644 index 00000000000..8594becb669 --- /dev/null +++ b/test/resilience/embed.go @@ -0,0 +1,13 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package resilience + +import "embed" + +//go:embed testdata/*.yaml +var Manifests embed.FS diff --git a/test/resilience/resilience_test.go b/test/resilience/resilience_test.go new file mode 100644 index 00000000000..6903860ed6e --- /dev/null +++ b/test/resilience/resilience_test.go @@ -0,0 +1,40 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package resilience + +import ( + "flag" + "io/fs" + "os" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/gateway-api/conformance/utils/flags" + "testing" + + "github.com/envoyproxy/gateway/test/resilience/suite" + "github.com/envoyproxy/gateway/test/resilience/tests" + kubetest "github.com/envoyproxy/gateway/test/utils/kubernetes" +) + +func TestResilience(t *testing.T) { + cli, _ := kubetest.NewClient(t) + // Parse benchmark options. + flag.Parse() + log.SetLogger(zap.New(zap.WriteTo(os.Stderr), zap.UseDevMode(true))) + bSuite, err := suite.NewResilienceTestSuite( + cli, + *suite.ReportSaveDir, + []fs.FS{Manifests}, + *flags.GatewayClassName, + ) + if err != nil { + t.Fatalf("Failed to create the resillience test suit: %v", err) + } + t.Logf("Running %d resilience tests", len(tests.ResilienceTests)) + bSuite.Run(t, tests.ResilienceTests) +} diff --git a/test/resilience/suite/flags.go b/test/resilience/suite/flags.go new file mode 100644 index 00000000000..1004548570d --- /dev/null +++ b/test/resilience/suite/flags.go @@ -0,0 +1,14 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package suite + +import "flag" + +var ( + ReportSaveDir = flag.String("report-save-dir", "benchmark_report", "The dir where to save the benchmark test report.") +) diff --git a/test/resilience/suite/suite.go b/test/resilience/suite/suite.go new file mode 100644 index 00000000000..ec60c4f4236 --- /dev/null +++ b/test/resilience/suite/suite.go @@ -0,0 +1,115 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package suite + +import ( + "context" + "github.com/envoyproxy/gateway/test/utils/kubernetes" + "io/fs" + "sigs.k8s.io/gateway-api/conformance/utils/roundtripper" + "testing" + "time" + + opt "github.com/envoyproxy/gateway/internal/cmd/options" + kube "github.com/envoyproxy/gateway/internal/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/gateway-api/conformance/utils/config" +) + +const ( + BenchmarkTestScaledKey = "benchmark-test/scaled" + BenchmarkTestClientKey = "benchmark-test/client" + DefaultControllerName = "gateway.envoyproxy.io/gatewayclass-controller" +) + +type ResilienceTest struct { + ShortName string + Description string + Test func(*testing.T, *ResilienceTestSuite) +} + +type ResilienceTestSuite struct { + Client client.Client + TimeoutConfig config.TimeoutConfig + ControllerName string + ReportSaveDir string + KubeActions *kubernetes.KubeActions + // Labels + scaledLabels map[string]string // indicate which resources are scaled + + // Clients that for internal usage. + kubeClient kube.CLIClient // required for getting logs from pod\ + ManifestFS []fs.FS + GatewayClassName string + RoundTripper roundtripper.RoundTripper +} + +func NewResilienceTestSuite(client client.Client, reportDir string, manifestFS []fs.FS, gcn string) (*ResilienceTestSuite, error) { + var ( + timeoutConfig = config.TimeoutConfig{} + ) + + // Reset some timeout config for the benchmark test. + config.SetupTimeoutConfig(&timeoutConfig) + timeoutConfig.RouteMustHaveParents = 180 * time.Second + roundTripper := &roundtripper.DefaultRoundTripper{Debug: true, TimeoutConfig: timeoutConfig} + // Initial various client. + kubeClient, err := kube.NewCLIClient(opt.DefaultConfigFlags.ToRawKubeConfigLoader()) + if err != nil { + return nil, err + } + KubeActions := kubernetes.NewKubeHelper(client, kubeClient) + return &ResilienceTestSuite{ + Client: client, + ManifestFS: manifestFS, + TimeoutConfig: timeoutConfig, + ControllerName: DefaultControllerName, + ReportSaveDir: reportDir, + GatewayClassName: gcn, + scaledLabels: map[string]string{ + BenchmarkTestScaledKey: "true", + }, + KubeActions: KubeActions, + kubeClient: kubeClient, + RoundTripper: roundTripper, + }, nil +} + +func (rts *ResilienceTestSuite) WithResCleanUp(ctx context.Context, t *testing.T, f func() (client.Object, error)) error { + res, err := f() + t.Cleanup(func() { + t.Logf("Start to cleanup resilsence test resources") + if deleteErr := rts.Client.Delete(ctx, res); deleteErr != nil { + } + + t.Logf("Clean up complete!") + }) + return err +} + +func (rts *ResilienceTestSuite) Kube() *kubernetes.KubeActions { + return rts.KubeActions +} + +func (rts *ResilienceTestSuite) Run(t *testing.T, tests []ResilienceTest) { + t.Logf("Running %d resilience tests", len(tests)) + for _, test := range tests { + t.Logf("Running resilience test: %s", test.ShortName) + test.Test(t, rts) + } +} + +func (rts *ResilienceTestSuite) RegisterCleanup(t *testing.T, ctx context.Context, object client.Object) { + t.Cleanup(func() { + t.Logf("Start to cleanup resilsence test resources") + if deleteErr := rts.Client.Delete(ctx, object); deleteErr != nil { + } + + t.Logf("Clean up complete!") + }) +} diff --git a/test/resilience/testdata/base.yaml b/test/resilience/testdata/base.yaml new file mode 100644 index 00000000000..1be77ba9602 --- /dev/null +++ b/test/resilience/testdata/base.yaml @@ -0,0 +1,110 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: gateway-resilience +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + namespace: gateway-resilience + name: envoy-gateway +spec: + controllerName: gateway.envoyproxy.io/gatewayclass-controller + parametersRef: + group: gateway.envoyproxy.io + kind: EnvoyProxy + name: custom-proxy-config + namespace: gateway-resilience +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: all-namespaces + namespace: gateway-resilience +spec: + gatewayClassName: "{GATEWAY_CLASS_NAME}" + listeners: + - name: http + port: 80 + protocol: HTTP + allowedRoutes: + namespaces: + from: All +--- +apiVersion: v1 +kind: Service +metadata: + name: backend + namespace: gateway-resilience +spec: + selector: + app: backend + ports: + - protocol: TCP + port: 8080 + name: http11 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend + namespace: gateway-resilience + labels: + app: backend +spec: + replicas: 2 + selector: + matchLabels: + app: backend + template: + metadata: + labels: + app: backend + spec: + containers: + - name: backend + # From https://github.com/kubernetes-sigs/gateway-api/blob/main/conformance/echo-basic/echo-basic.go + image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SERVICE_NAME + value: backend + resources: + requests: + cpu: 10m +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: backend + namespace: gateway-resilience +spec: + parentRefs: + - name: all-namespaces + rules: + - matches: + - path: + type: PathPrefix + value: /welcome + backendRefs: + - name: backend + port: 8080 +--- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: EnvoyProxy +metadata: + name: custom-proxy-config + namespace: gateway-resilience +spec: + provider: + type: Kubernetes + kubernetes: + diff --git a/test/resilience/testdata/route_changes.yaml b/test/resilience/testdata/route_changes.yaml new file mode 100644 index 00000000000..2c8d98c0e8c --- /dev/null +++ b/test/resilience/testdata/route_changes.yaml @@ -0,0 +1,16 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: backend + namespace: gateway-resilience +spec: + parentRefs: + - name: all-namespaces + rules: + - matches: + - path: + type: PathPrefix + value: /route-change + backendRefs: + - name: backend + port: 8080 diff --git a/test/resilience/tests/envoygateway.go b/test/resilience/tests/envoygateway.go new file mode 100644 index 00000000000..1d8b8787879 --- /dev/null +++ b/test/resilience/tests/envoygateway.go @@ -0,0 +1,253 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package tests + +import ( + "context" + "github.com/envoyproxy/gateway/test/resilience/suite" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/gateway-api/conformance/utils/http" + "sigs.k8s.io/gateway-api/conformance/utils/kubernetes" + "sigs.k8s.io/gateway-api/conformance/utils/tlog" + "testing" + "time" +) + +const ( + namespace = "envoy-gateway-system" + envoygateway = "envoy-gateway" + targetString = "successfully acquired lease" + apiServerIP = "10.96.0.1" + timeout = 2 * time.Minute + policyName = "egress-rules" + leaseName = "5b9825d2.gateway.envoyproxy.io" + trashHold = 2 +) + +func init() { + ResilienceTests = append(ResilienceTests, EGResilience) +} + +var EGResilience = suite.ResilienceTest{ + ShortName: "EGResilience", + Description: "Envoygateway resilience test", + Test: func(t *testing.T, suite *suite.ResilienceTestSuite) { + ap := kubernetes.Applier{ + ManifestFS: suite.ManifestFS, + GatewayClass: suite.GatewayClassName, + ControllerName: "gateway.envoyproxy.io/gatewayclass-controller", + } + ap.MustApplyWithCleanup(t, suite.Client, suite.TimeoutConfig, "testdata/base.yaml", true) + + //this test will fail until https://github.com/envoyproxy/gateway/pull/4767/files is merged + t.Run("Secondary EnvoyGateway instances can serve an up to date xDS", func(t *testing.T) { + ctx := context.Background() + t.Log("Scaling down the deployment to 0 replicas") + err := suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 0, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment replicas") + + t.Log("Scaling up the deployment to 3 replicas") + err = suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 3, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment replicas") + + t.Log("Waiting for leader election") + // Ensure leadership was taken + name, err := suite.Kube().GetElectedLeader(context.Background(), namespace, leaseName, metav1.Now(), time.Minute*2) + require.NoError(t, err, "unable to detect leader election") + + t.Log("Marking the identified pod as leader") + suite.Kube().MarkAsLeader(namespace, name) + + // Pods rely on connectivity to the API server to participate in leader election processes. + // Without this connectivity, they cannot become leaders, in this test we won't bring it back. + // The secondary pods will continue to operate using their last known good configuration (xDS) + // and share it with envoy proxies accordingly. + t.Log("Simulating API server connection failure for all pods") + err = suite.WithResCleanUp(ctx, t, func() (client.Object, error) { + return suite.Kube().ManageEgress(ctx, apiServerIP, namespace, policyName, true, map[string]string{"app.kubernetes.io/name": "gateway-helm"}) + }) + require.NoError(t, err, "Failed to simulate API server connection failure") + + // The leader pod should go down, the standby pods remain + // When a leader pod loses connectivity to the API server, Kubernetes does not immediately terminate or stop the pod. + // Instead, the pod itself detects the loss of connectivity, initiates a graceful teardown process, and restarts to attempt + // to reconnect to the API server. + // The replica count for the deployment remains at 3 throughout the process. + // Kubernetes does not schedule a new pod to replace the one that lost connectivity because the existing pod is not + // considered failed from Kubernetes’ perspective. It’s the responsibility of the application running inside the + // pod (e.g., the leader election logic) to handle reconnection attempts or restart itself. + t.Log("Verifying deployment scales down to 2 replica") + err = suite.Kube().CheckDeploymentReplicas(ctx, envoygateway, namespace, 2, time.Minute) + require.NoError(t, err, "Deployment did not scale down") + + ns := "gateway-resilience" + routeNN := types.NamespacedName{Name: "backend", Namespace: ns} + gwNN := types.NamespacedName{Name: "all-namespaces", Namespace: ns} + gwAddr := kubernetes.GatewayAndHTTPRoutesMustBeAccepted(t, suite.Client, suite.TimeoutConfig, suite.ControllerName, kubernetes.NewGatewayRef(gwNN), routeNN) + + expectedResponse := http.ExpectedResponse{ + Request: http.Request{ + Path: "/welcome", + }, + Response: http.Response{ + StatusCode: 200, + }, + Namespace: ns, + } + + req := http.MakeRequest(t, &expectedResponse, gwAddr, "http", "http") + http.AwaitConvergence(t, trashHold, timeout, func(elapsed time.Duration) bool { + cReq, cRes, err := suite.RoundTripper.CaptureRoundTrip(req) + if err != nil { + tlog.Logf(t, "Request failed, not ready yet: %v (after %v)", err.Error(), elapsed) + return false + } + + if err := http.CompareRequest(t, &req, cReq, cRes, expectedResponse); err != nil { + tlog.Logf(t, "Response expectation failed for request: %+v not ready yet: %v (after %v)", req, err, elapsed) + return false + } + return true + }) + }) + + t.Run("EnvoyGateway reconciles missed resources and sync xDS after api server connectivity is restored", func(t *testing.T) { + err := suite.Kube().ScaleDeploymentAndWait(context.Background(), envoygateway, namespace, 0, timeout, false) + require.NoError(t, err, "Failed to scale deployment") + err = suite.Kube().ScaleDeploymentAndWait(context.Background(), envoygateway, namespace, 1, timeout, false) + require.NoError(t, err, "Failed to scale deployment") + + // Ensure leadership was taken + _, err = suite.Kube().GetElectedLeader(context.Background(), namespace, leaseName, metav1.Now(), timeout) + require.NoError(t, err, "unable to detect leader election") + + t.Log("Simulating API server down for all pods") + err = suite.WithResCleanUp(context.Background(), t, func() (client.Object, error) { + return suite.Kube().ManageEgress(context.Background(), apiServerIP, namespace, policyName, true, map[string]string{}) + }) + require.NoError(t, err, "unable to block api server connectivity") + + ap.MustApplyWithCleanup(t, suite.Client, suite.TimeoutConfig, "testdata/route_changes.yaml", true) + t.Log("backend routes changed") + + t.Log("restore API server connectivity") + _, err = suite.Kube().ManageEgress(context.Background(), apiServerIP, namespace, policyName, false, map[string]string{}) + require.NoError(t, err, "unable to unblock api server connectivity") + + err = suite.Kube().WaitForDeploymentReplicaCount(context.Background(), envoygateway, namespace, 1, time.Minute, false) + require.NoError(t, err, "Failed to ensure that pod is online") + _, err = suite.Kube().GetElectedLeader(context.Background(), namespace, leaseName, metav1.Now(), time.Minute*2) + require.NoError(t, err, "unable to detect leader election") + t.Log("eg is online") + ns := "gateway-resilience" + routeNN := types.NamespacedName{Name: "backend", Namespace: ns} + gwNN := types.NamespacedName{Name: "all-namespaces", Namespace: ns} + gwAddr := kubernetes.GatewayAndHTTPRoutesMustBeAccepted(t, suite.Client, suite.TimeoutConfig, suite.ControllerName, kubernetes.NewGatewayRef(gwNN), routeNN) + + expectedResponse := http.ExpectedResponse{ + Request: http.Request{ + Path: "/route-change", + }, + Response: http.Response{ + StatusCode: 200, + }, + Namespace: ns, + } + + req := http.MakeRequest(t, &expectedResponse, gwAddr, "http", "http") + http.AwaitConvergence(t, trashHold, time.Minute, func(elapsed time.Duration) bool { + cReq, cRes, err := suite.RoundTripper.CaptureRoundTrip(req) + if err != nil { + tlog.Logf(t, "Request failed, not ready yet: %v (after %v)", err.Error(), elapsed) + return false + } + + if err := http.CompareRequest(t, &req, cReq, cRes, expectedResponse); err != nil { + tlog.Logf(t, "Response expectation failed for request: %+v not ready yet: %v (after %v)", req, err, elapsed) + return false + } + return true + }) + + require.NoError(t, err, "Failed during connectivity checkup") + }) + + t.Run("Leader election transitions when leader loses API server connection", func(t *testing.T) { + ctx := context.Background() + t.Log("Scaling down the deployment to 0 replicas") + err := suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 0, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment replicas") + + t.Log("Scaling up the deployment to 2 replicas") + err = suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 2, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment replicas") + + t.Log("Waiting for leader election") + // Ensure leadership was taken + name, err := suite.Kube().GetElectedLeader(context.Background(), namespace, leaseName, metav1.Now(), time.Minute*2) + require.NoError(t, err, "unable to detect leader election") + + t.Log("Marking the identified pod as leader") + suite.Kube().MarkAsLeader(namespace, name) + + t.Log("Simulating API server connection failure for the leader") + err = suite.WithResCleanUp(ctx, t, func() (client.Object, error) { + return suite.Kube().ManageEgress(ctx, apiServerIP, namespace, policyName, true, map[string]string{ + "leader": "true", + }) + }) + require.NoError(t, err, "Failed to simulate API server connection failure") + + // leader pod should go down, the standby remain + t.Log("Verifying deployment scales down to 1 replicas") + err = suite.Kube().CheckDeploymentReplicas(ctx, envoygateway, namespace, 1, time.Minute) + require.NoError(t, err, "Deployment did not scale down") + + // Ensure leadership was taken + newLeader, err := suite.Kube().GetElectedLeader(context.Background(), namespace, leaseName, metav1.Now(), time.Minute*2) + require.NoError(t, err, "unable to detect leader election") + require.NotEqual(t, newLeader, name, "new leader name should not be equal to the first leader") + ap.MustApplyWithCleanup(t, suite.Client, suite.TimeoutConfig, "testdata/route_changes.yaml", true) + t.Log("backend routes changed") + + ns := "gateway-resilience" + routeNN := types.NamespacedName{Name: "backend", Namespace: ns} + gwNN := types.NamespacedName{Name: "all-namespaces", Namespace: ns} + gwAddr := kubernetes.GatewayAndHTTPRoutesMustBeAccepted(t, suite.Client, suite.TimeoutConfig, suite.ControllerName, kubernetes.NewGatewayRef(gwNN), routeNN) + + expectedResponse := http.ExpectedResponse{ + Request: http.Request{ + Path: "/route-change", + }, + Response: http.Response{ + StatusCode: 200, + }, + Namespace: ns, + } + + req := http.MakeRequest(t, &expectedResponse, gwAddr, "http", "http") + + http.AwaitConvergence(t, trashHold, timeout, func(elapsed time.Duration) bool { + cReq, cRes, err := suite.RoundTripper.CaptureRoundTrip(req) + if err != nil { + tlog.Logf(t, "Request failed, not ready yet: %v (after %v)", err.Error(), elapsed) + return false + } + + if err := http.CompareRequest(t, &req, cReq, cRes, expectedResponse); err != nil { + tlog.Logf(t, "Response expectation failed for request: %+v not ready yet: %v (after %v)", req, err, elapsed) + return false + } + return true + }) + }) + }, +} diff --git a/test/resilience/tests/envoyproxy.go b/test/resilience/tests/envoyproxy.go new file mode 100644 index 00000000000..492e4530c4a --- /dev/null +++ b/test/resilience/tests/envoyproxy.go @@ -0,0 +1,92 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package tests + +import ( + "context" + "github.com/envoyproxy/gateway/test/resilience/suite" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/gateway-api/conformance/utils/http" + "sigs.k8s.io/gateway-api/conformance/utils/kubernetes" + "sigs.k8s.io/gateway-api/conformance/utils/tlog" + "testing" + "time" +) + +func init() { + ResilienceTests = append(ResilienceTests, EPResilience) +} + +var EPResilience = suite.ResilienceTest{ + ShortName: "EPResilience", + Description: "Envoyproxy resilience test", + Test: func(t *testing.T, suite *suite.ResilienceTestSuite) { + var () + + ap := kubernetes.Applier{ + ManifestFS: suite.ManifestFS, + GatewayClass: suite.GatewayClassName, + ControllerName: "gateway.envoyproxy.io/gatewayclass-controller", + } + + ap.MustApplyWithCleanup(t, suite.Client, suite.TimeoutConfig, "testdata/base.yaml", true) + + t.Run("Envoy proxies continue to work even when eg is offline", func(t *testing.T) { + ctx := context.Background() + + t.Log("Scaling down the deployment to 2 replicas") + err := suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 2, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment replicas") + + t.Log("ensure envoy proxy is running") + err = suite.Kube().CheckDeploymentReplicas(ctx, envoygateway, namespace, 2, time.Minute) + require.NoError(t, err, "Failed to check deployment replicas") + + t.Log("Scaling down the deployment to 0 replicas") + err = suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 0, time.Minute, false) + require.NoError(t, err, "Failed to scale deployment to replicas") + + t.Cleanup(func() { + err := suite.Kube().ScaleDeploymentAndWait(ctx, envoygateway, namespace, 1, time.Minute, false) + require.NoError(t, err, "Failed to restore replica count.") + }) + + require.NoError(t, err, "failed to add cleanup") + + ns := "gateway-resilience" + routeNN := types.NamespacedName{Name: "backend", Namespace: ns} + gwNN := types.NamespacedName{Name: "all-namespaces", Namespace: ns} + gwAddr := kubernetes.GatewayAndHTTPRoutesMustBeAccepted(t, suite.Client, suite.TimeoutConfig, suite.ControllerName, kubernetes.NewGatewayRef(gwNN), routeNN) + + expectedResponse := http.ExpectedResponse{ + Request: http.Request{ + Path: "/welcome", + }, + Response: http.Response{ + StatusCode: 200, + }, + Namespace: ns, + } + + req := http.MakeRequest(t, &expectedResponse, gwAddr, "http", "http") + http.AwaitConvergence(t, trashHold, timeout, func(elapsed time.Duration) bool { + cReq, cRes, err := suite.RoundTripper.CaptureRoundTrip(req) + if err != nil { + tlog.Logf(t, "Request failed, not ready yet: %v (after %v)", err.Error(), elapsed) + return false + } + if err := http.CompareRequest(t, &req, cReq, cRes, expectedResponse); err != nil { + tlog.Logf(t, "Response expectation failed for request: %+v not ready yet: %v (after %v)", req, err, elapsed) + return false + } + return true + }) + }) + }, +} diff --git a/test/resilience/tests/tests.go b/test/resilience/tests/tests.go new file mode 100644 index 00000000000..54a2d7bbde8 --- /dev/null +++ b/test/resilience/tests/tests.go @@ -0,0 +1,12 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +//go:build resilience + +package tests + +import "github.com/envoyproxy/gateway/test/resilience/suite" + +var ResilienceTests []suite.ResilienceTest diff --git a/test/utils/kubernetes/kube.go b/test/utils/kubernetes/kube.go new file mode 100644 index 00000000000..0660bfbc99e --- /dev/null +++ b/test/utils/kubernetes/kube.go @@ -0,0 +1,322 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + coordinationv1 "k8s.io/api/coordination/v1" + networkingv1 "k8s.io/api/networking/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + egv1a1 "github.com/envoyproxy/gateway/api/v1alpha1" + kube "github.com/envoyproxy/gateway/internal/kubernetes" +) + +// NewKubeHelper consolidates common Kubernetes operations, including deployments, traffic management, and log probing. +func NewKubeHelper(client client.Client, kubeClient kube.CLIClient) *KubeActions { + return &KubeActions{ + Client: client, + CLIClient: kubeClient, + } +} + +type KubeActions struct { + client.Client + kube.CLIClient +} + +func (ka *KubeActions) ManageEgress(ctx context.Context, ip, namespace, policyName string, blockTraffic bool, scope map[string]string) (*networkingv1.NetworkPolicy, error) { + // Retrieve the existing NetworkPolicy, if it exists + existingPolicy := &networkingv1.NetworkPolicy{} + err := ka.Get(ctx, client.ObjectKey{Name: policyName, Namespace: namespace}, existingPolicy) + if err != nil && !kerrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get existing NetworkPolicy: %w", err) + } + + // Define the Egress rule based on the enforce parameter + egressRule := networkingv1.NetworkPolicyEgressRule{ + To: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + Except: []string{ + ip + "/32", + }, + }, + }, + }, + } + // Define the NetworkPolicy object + netPolicy := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + Namespace: namespace, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: scope, + }, // Selects all pods in the namespace + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeEgress, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + egressRule, + }, + }, + } + + // remove the policy + if !blockTraffic { + if err := ka.Client.Delete(ctx, netPolicy); err != nil { + return nil, fmt.Errorf("failed to delete NetworkPolicy: %w", err) + } + return nil, nil + } + + if kerrors.IsNotFound(err) { + // Create the NetworkPolicy if it doesn't exist + if err := ka.Client.Create(ctx, netPolicy); err != nil { + return nil, fmt.Errorf("failed to create NetworkPolicy: %w", err) + } + fmt.Printf("NetworkPolicy %s created.\n", netPolicy.Name) + } else { + // Update the existing NetworkPolicy + existingPolicy.Spec = netPolicy.Spec + if err := ka.Client.Update(ctx, existingPolicy); err != nil { + return nil, fmt.Errorf("failed to update NetworkPolicy: %w", err) + } + fmt.Printf("NetworkPolicy %s updated.\n", netPolicy.Name) + } + + return netPolicy, nil +} + +func (ka *KubeActions) ScaleDeploymentAndWait(ctx context.Context, deploymentName, namespace string, replicas int32, timeout time.Duration, prefix bool) error { + // Get the current deployment + deployment := &appsv1.Deployment{} + if prefix { + var err error + deployment, err = ka.getDepByPrefix(ctx, deploymentName, namespace) + if err != nil { + return err + } + } else { + err := ka.Client.Get(ctx, client.ObjectKey{Name: deploymentName, Namespace: namespace}, deployment) + if err != nil { + return err + } + } + + // Update the replicas count + deployment.Spec.Replicas = &replicas + + // Apply the update + err := ka.Client.Update(ctx, deployment) + if err != nil { + return err + } + + fmt.Printf("Deployment %s scaled to %d replicas\n", deployment.Name, replicas) + return ka.WaitForDeploymentReplicaCount(ctx, deployment.Name, namespace, replicas, timeout, false) +} + +func (ka *KubeActions) ScaleEnvoyProxy(envoyProxyName, namespace string, replicas int32) error { + ctx := context.Background() + + // Retrieve the existing EnvoyProxy resource + envoyProxy := &egv1a1.EnvoyProxy{} + err := ka.Client.Get(ctx, types.NamespacedName{Name: envoyProxyName, Namespace: namespace}, envoyProxy) + if err != nil { + return fmt.Errorf("failed to get EnvoyProxy: %w", err) + } + envoyProxy.Spec.Provider.Kubernetes = &egv1a1.EnvoyProxyKubernetesProvider{ + EnvoyDeployment: &egv1a1.KubernetesDeploymentSpec{ + Replicas: ptr.To[int32](replicas), + }, + } + + // Update the replicas count + envoyProxy.Spec.Provider.Kubernetes.EnvoyDeployment.Replicas = &replicas + + // Apply the update + err = ka.Client.Update(ctx, envoyProxy) + if err != nil { + return fmt.Errorf("failed to update EnvoyProxy: %w", err) + } + + return nil +} + +func (ka *KubeActions) MarkAsLeader(namespace, podName string) { + pod, err := ka.Kube().CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + + // Initialize the labels map if it's nil + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + + // Add or update the desired label + pod.Labels["leader"] = "true" + + // Update the Pod with the new label + updatedPod, err := ka.Kube().CoreV1().Pods(namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) + if err != nil { + panic(err.Error()) + } + + fmt.Printf("Pod %s updated with new label.\n", updatedPod.Name) +} + +func (ka *KubeActions) WaitForDeploymentReplicaCount(ctx context.Context, deploymentName, namespace string, replicas int32, timeout time.Duration, prefix bool) error { + start := time.Now() + + for { + // Check if the timeout has been reached + if time.Since(start) > timeout { + return errors.New("timeout reached waiting for deployment to scale") + } + + // Get the current deployment status + deployment := &appsv1.Deployment{} + + if prefix { + var err error + deployment, err = ka.getDepByPrefix(ctx, deploymentName, namespace) + if err != nil { + return err + } + } else { + err := ka.Get(ctx, client.ObjectKey{Name: deploymentName, Namespace: namespace}, deployment) + if err != nil { + return err + } + } + + // Check if the deployment has reached the desired number of replicas + if deployment.Status.ReadyReplicas == replicas { + fmt.Printf("Deployment %s scaled to %d replicas\n", deploymentName, replicas) + return nil + } + + // Wait before checking again + time.Sleep(5 * time.Second) + } +} + +func (ka *KubeActions) CheckDeploymentReplicas(ctx context.Context, prefix, namespace string, expectedReplicas int, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + deployment, err := ka.getDepByPrefix(ctx, prefix, namespace) + if err != nil { + return err + } + + if deployment != nil { + // Wait for the deployment to reach the expected replica count + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout reached: deployment %q did not reach %d replicas", deployment.Name, expectedReplicas) + default: + // Fetch the current status of the deployment + deployment, err := ka.Kube().AppsV1().Deployments(namespace).Get(ctx, deployment.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %q: %w", deployment.Name, err) + } + + // Check the ready replica count + if int(deployment.Status.ReadyReplicas) == expectedReplicas { + fmt.Printf("Deployment %q reached %d replicas as expected.\n", deployment.Name, expectedReplicas) + return nil + } + + fmt.Printf("Waiting for deployment %q: ready replicas %d/%d\n", + deployment.Name, deployment.Status.ReadyReplicas, expectedReplicas) + time.Sleep(1 * time.Second) // Retry interval + } + } + } + return errors.New("deployment was not found") +} + +func (ka *KubeActions) getDepByPrefix(ctx context.Context, prefix string, namespace string) (*appsv1.Deployment, error) { + deployments, err := ka.Kube().AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list deployments: %w", err) + } + + // Search for the deployment with the specified prefix + for _, dep := range deployments.Items { + if len(dep.Name) >= len(prefix) && dep.Name[:len(prefix)] == prefix { + return &dep, nil + } + } + return nil, errors.New("deployment not found") +} + +func (ka *KubeActions) GetElectedLeader(ctx context.Context, namespace, leaseName string, afterTime metav1.Time, timeout time.Duration) (string, error) { + // Create a context with a timeout + ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + // Fetch the Lease object + lease, err := ka.getLease(ctxWithTimeout, namespace, leaseName) + if err != nil { + return "", fmt.Errorf("failed to get lease %s in namespace %s: %w", leaseName, namespace, err) + } + + // Check if RenewTime matches the condition + if lease.Spec.RenewTime != nil && lease.Spec.RenewTime.After(afterTime.Time) { + if lease.Spec.HolderIdentity == nil || *lease.Spec.HolderIdentity == "" { + return "", fmt.Errorf("lease %s does not have a valid holderIdentity", leaseName) + } + + // Return the leader pod name + hi := *lease.Spec.HolderIdentity + parts := strings.SplitN(hi, "_", 2) + + // Return the left part (pod name) + if len(parts) > 0 { + return parts[0], nil + } else { + return "", fmt.Errorf("lease %s does not have a valid holderIdentity", leaseName) + } + } + + // Sleep for a short interval before retrying to avoid excessive API calls + select { + case <-ctxWithTimeout.Done(): + return "", fmt.Errorf("timeout reached while waiting for lease renew time: %w", ctxWithTimeout.Err()) + case <-time.After(1 * time.Second): + // Retry after a delay + } + } +} + +func (ka *KubeActions) getLease(ctx context.Context, namespace, leaseName string) (*coordinationv1.Lease, error) { + // Fetch the Lease object + lease, err := ka.Kube().CoordinationV1().Leases(namespace).Get(ctx, leaseName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get lease %s in namespace %s: %w", leaseName, namespace, err) + } + + return lease, nil +} diff --git a/tools/hack/create-cluster.sh b/tools/hack/create-cluster.sh index c779aa0d6f3..ffb2408a669 100755 --- a/tools/hack/create-cluster.sh +++ b/tools/hack/create-cluster.sh @@ -8,11 +8,19 @@ METALLB_VERSION=${METALLB_VERSION:-"v0.13.10"} KIND_NODE_TAG=${KIND_NODE_TAG:-"v1.32.0"} NUM_WORKERS=${NUM_WORKERS:-""} IP_FAMILY=${IP_FAMILY:-"ipv4"} +CUSTOM_CNI=${CUSTOM_CNI:-"false"} + +if [ "$CUSTOM_CNI" = "true" ]; then + CNI_CONFIG="disableDefaultCNI: true" +else + CNI_CONFIG="disableDefaultCNI: false" +fi KIND_CFG=$(cat <<-EOM kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 networking: + ${CNI_CONFIG} ipFamily: ${IP_FAMILY} # it's to prevent inherit search domains from the host which slows down DNS resolution # and cause problems to IPv6 only clusters running on IPv4 host. @@ -44,7 +52,38 @@ ${KIND_CFG} EOF fi fi - +if [ "$CUSTOM_CNI" = "true" ]; then +## Install Calico +# Determine the operating system +OS=$(uname -s) +case $OS in + Darwin) + CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) + CLI_ARCH=amd64 + if [ "$(uname -m)" = "arm64" ]; then CLI_ARCH=arm64; fi + curl -L --fail --remote-name-all "https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-darwin-${CLI_ARCH}.tar.gz"{,.sha256sum} + shasum -a 256 -c cilium-darwin-${CLI_ARCH}.tar.gz.sha256sum + tar xf cilium-darwin-${CLI_ARCH}.tar.gz + rm cilium-darwin-${CLI_ARCH}.tar.gz{,.sha256sum} + ;; + Linux) + CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) + CLI_ARCH=amd64 + if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi + curl -L --fail --remote-name-all "https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz"{,.sha256sum} + sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum + tar xf cilium-linux-${CLI_ARCH}.tar.gz + rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} + ;; + *) + echo "Unsupported operating system: $OS" + exit 1 + ;; +esac +mkdir -p bin +chmod +x cilium +mv cilium bin +fi ## Install MetalLB. kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/"${METALLB_VERSION}"/config/manifests/metallb-native.yaml @@ -53,9 +92,6 @@ if [ -z "$needCreate" ]; then kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" fi -# Wait for MetalLB to become available. -kubectl rollout status -n metallb-system deployment/controller --timeout 5m -kubectl rollout status -n metallb-system daemonset/speaker --timeout 5m # Apply config with addresses based on docker network IPAM. address_ranges="" @@ -82,8 +118,8 @@ if [ -z "${address_ranges}" ]; then exit 1 fi -# Apply MetalLB IPAddressPool and L2Advertisement -kubectl apply -f - </dev/null 2>&1 apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: @@ -102,3 +138,29 @@ spec: ipAddressPools: - kube-services EOF +} + +RETRY_INTERVAL=5 # seconds +TIMEOUT=120 # seconds +ELAPSED_TIME=0 + +if [ "$CUSTOM_CNI" = "true" ]; then + CILIUM_BIN="./bin/cilium" + $CILIUM_BIN install --wait --version 1.16.4 + $CILIUM_BIN status --wait +fi + +# Apply MetalLB IPAddressPool and L2Advertisement +echo "Applying configuration with retries..." + # Retry loop + while [ $ELAPSED_TIME -lt $TIMEOUT ]; do + if apply_metallb_ranges; then + echo "Configuration applied successfully." + exit 0 + else + echo "Trying to apply configuration. Retrying in $RETRY_INTERVAL seconds..." + fi + sleep $RETRY_INTERVAL + ELAPSED_TIME=$((ELAPSED_TIME + RETRY_INTERVAL)) + done + diff --git a/tools/make/kube.mk b/tools/make/kube.mk index ecb458e2791..ad9f04efc52 100644 --- a/tools/make/kube.mk +++ b/tools/make/kube.mk @@ -145,6 +145,9 @@ experimental-conformance: create-cluster kube-install-image kube-deploy run-expe .PHONY: benchmark benchmark: create-cluster kube-install-image kube-deploy-for-benchmark-test run-benchmark delete-cluster ## Create a kind cluster, deploy EG into it, run Envoy Gateway benchmark test, and clean up. +.PHONY: resilience +resilience: create-cluster kube-install-image kube-deploy run-resilience delete-cluster ## Create a kind cluster, deploy EG into it, run Envoy Gateway resilience test, and clean up. + .PHONY: e2e e2e: create-cluster kube-install-image kube-deploy \ install-ratelimit install-eg-addons kube-install-examples-image \ @@ -177,6 +180,11 @@ else --run-test $(E2E_RUN_TEST) endif +.PHONY: run-resilience +run-resilience: ## Run resilience tests + @$(LOG_TARGET) + go test -v -tags resilience ./test/resilience --gateway-class=envoy-gateway + .PHONY: run-benchmark run-benchmark: install-benchmark-server prepare-ip-family ## Run benchmark tests @$(LOG_TARGET) From 67bf63c61076e02342690ae841fada432558c8f7 Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Mon, 16 Dec 2024 07:42:43 -0800 Subject: [PATCH 13/16] docs: Set GA4 ID (#4919) * Use the same as the envoy proxy and envoy mobile websites Relates to https://github.com/envoyproxy/envoy-website/issues/273 & https://github.com/envoyproxy/envoy-mobile/issues/2680 Signed-off-by: Arko Dasgupta --- site/hugo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/hugo.toml b/site/hugo.toml index 6fabd7aa245..822ef19d0f5 100644 --- a/site/hugo.toml +++ b/site/hugo.toml @@ -53,7 +53,7 @@ anchor = "smart" [services] [services.googleAnalytics] # Comment out the next line to disable GA tracking. Also disables the feature described in [params.ui.feedback]. -id = "UA-00000000-0" +id = "G-DXJEH1ZRXX" # Language configuration From a9f6cd2b414e9f2c25012fb251c88088e745d1cb Mon Sep 17 00:00:00 2001 From: "Huabing (Robin) Zhao" Date: Tue, 17 Dec 2024 11:01:16 +0800 Subject: [PATCH 14/16] chore: set go version for the osv scanner (#4941) set go version for osv scanner Signed-off-by: Huabing Zhao --- .github/workflows/osv-scanner.yml | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/.github/workflows/osv-scanner.yml b/.github/workflows/osv-scanner.yml index e43942d85c7..90dfcbfa9e6 100644 --- a/.github/workflows/osv-scanner.yml +++ b/.github/workflows/osv-scanner.yml @@ -19,27 +19,34 @@ permissions: jobs: scan-scheduled: if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }} - uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@19ec1116569a47416e11a45848722b1af31a857b" # v1.9.0 + runs-on: ubuntu-latest + steps: + - uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@19ec1116569a47416e11a45848722b1af31a857b" # v1.9.0 + with: + scan-args: |- + --skip-git + --recursive + ./ permissions: actions: read contents: read # Require writing security events to upload SARIF file to security tab security-events: write - with: - scan-args: |- - --skip-git - --recursive - ./ scan-pr: if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} - uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@19ec1116569a47416e11a45848722b1af31a857b" # v1.9.0 + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v5 + with: + go-version: '1.23.4' # The Go version to download (if necessary) and use. + - uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@19ec1116569a47416e11a45848722b1af31a857b" # v1.9.0 + with: + scan-args: |- + --skip-git + --recursive + ./ permissions: actions: read contents: read security-events: write - with: - scan-args: |- - --skip-git - --recursive - ./ From f2641b3ab3ac507e0b34bcea7762c6a85309304a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:17:59 +0800 Subject: [PATCH 15/16] build(deps): bump softprops/action-gh-release from 2.1.0 to 2.2.0 (#4922) Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/softprops/action-gh-release/releases) - [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md) - [Commits](https://github.com/softprops/action-gh-release/compare/01570a1f39cb168c169c802c3bceb9e93fb10974...7b4da11513bf3f43f9999e90eabced41ab8bb048) --- updated-dependencies: - dependency-name: softprops/action-gh-release dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: zirain --- .github/workflows/latest_release.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/latest_release.yaml b/.github/workflows/latest_release.yaml index 47de6b9af60..27b7cf70670 100644 --- a/.github/workflows/latest_release.yaml +++ b/.github/workflows/latest_release.yaml @@ -107,7 +107,7 @@ jobs: GITHUB_REPOSITORY: ${{ github.repository_owner }}/${{ github.event.repository.name }} - name: Recreate the Latest Release and Tag - uses: softprops/action-gh-release@01570a1f39cb168c169c802c3bceb9e93fb10974 # v0.1.15 + uses: softprops/action-gh-release@7b4da11513bf3f43f9999e90eabced41ab8bb048 # v0.1.15 with: draft: false prerelease: true diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 47636612dbf..5481116ae48 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -96,7 +96,7 @@ jobs: tar -zcvf egctl_${{ env.release_tag }}_darwin_arm64.tar.gz bin/darwin/arm64/egctl - name: Upload Release Manifests - uses: softprops/action-gh-release@01570a1f39cb168c169c802c3bceb9e93fb10974 # v0.1.15 + uses: softprops/action-gh-release@7b4da11513bf3f43f9999e90eabced41ab8bb048 # v0.1.15 with: files: | release-artifacts/install.yaml From 4cba2e2703847bb106b8d3bf5cb0eb355aabf9e1 Mon Sep 17 00:00:00 2001 From: keithfz Date: Mon, 16 Dec 2024 22:43:43 -0500 Subject: [PATCH 16/16] fix: fixing some misleading unit test case names (#4934) --- api/v1alpha1/kubernetes_helpers.go | 4 +-- .../validation/envoyproxy_validate_test.go | 30 +++++++++++-------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/api/v1alpha1/kubernetes_helpers.go b/api/v1alpha1/kubernetes_helpers.go index 761f880d29b..6dd6b5fbfcc 100644 --- a/api/v1alpha1/kubernetes_helpers.go +++ b/api/v1alpha1/kubernetes_helpers.go @@ -239,10 +239,10 @@ func (service *KubernetesServiceSpec) ApplyMergePatch(old *corev1.Service) (*cor var patchedJSON []byte var err error - // Serialize the current deployment to JSON + // Serialize the current service to JSON originalJSON, err := json.Marshal(old) if err != nil { - return nil, fmt.Errorf("error marshaling original deployment: %w", err) + return nil, fmt.Errorf("error marshaling original service: %w", err) } switch { diff --git a/api/v1alpha1/validation/envoyproxy_validate_test.go b/api/v1alpha1/validation/envoyproxy_validate_test.go index 8a784db59ab..9c96792f541 100644 --- a/api/v1alpha1/validation/envoyproxy_validate_test.go +++ b/api/v1alpha1/validation/envoyproxy_validate_test.go @@ -403,7 +403,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: true, }, { - name: "should be invalid when service patch type is empty", + name: "should be valid when service patch is empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -427,7 +427,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: true, }, { - name: "should be invalid when deployment patch type is empty", + name: "should be valid when deployment patch is empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -500,7 +500,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: true, }, { - name: "should be invalid when pdb patch not set", + name: "should be invalid when pdb patch object is empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -522,7 +522,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: false, }, { - name: "should be invalid when pdb type not set", + name: "should be valid when pdb type not set", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -534,14 +534,16 @@ func TestValidateEnvoyProxy(t *testing.T) { Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ EnvoyPDB: &egv1a1.KubernetesPodDisruptionBudgetSpec{ Patch: &egv1a1.KubernetesPatchSpec{ - Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{}"), + }, }, }, }, }, }, }, - expected: false, + expected: true, }, { name: "should be valid when hpa patch and type are empty", @@ -593,7 +595,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: true, }, { - name: "should be invalid when hpa patch not set", + name: "should be invalid when hpa patch object is empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -615,7 +617,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: false, }, { - name: "should be invalid when hpa type not set", + name: "should be valid when hpa type not set", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -627,17 +629,19 @@ func TestValidateEnvoyProxy(t *testing.T) { Kubernetes: &egv1a1.EnvoyProxyKubernetesProvider{ EnvoyHpa: &egv1a1.KubernetesHorizontalPodAutoscalerSpec{ Patch: &egv1a1.KubernetesPatchSpec{ - Type: ptr.To(egv1a1.StrategicMerge), + Value: apiextensionsv1.JSON{ + Raw: []byte("{}"), + }, }, }, }, }, }, }, - expected: false, + expected: true, }, { - name: "should invalid when patch object is empty", + name: "should invalid when deployment patch object is empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -659,7 +663,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: false, }, { - name: "should valid when patch type and object are both not empty", + name: "should valid when deployment patch type and object are both not empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test", @@ -684,7 +688,7 @@ func TestValidateEnvoyProxy(t *testing.T) { expected: true, }, { - name: "should valid when patch type is empty and object is not empty", + name: "should valid when deployment patch type is empty and object is not empty", proxy: &egv1a1.EnvoyProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test",