Skip to content

Commit

Permalink
can only handle one kind of LoadBalancerClass in a cluster due to clo…
Browse files Browse the repository at this point in the history
…ud-provider wanting no loadBalancerClass set
  • Loading branch information
AshleyDumaine committed May 16, 2024
1 parent 4013314 commit bd542af
Show file tree
Hide file tree
Showing 7 changed files with 28 additions and 140 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ For general feature and usage notes, refer to the [Getting Started with Linode N

#### Using IP Sharing instead of NodeBalancers
Alternatively, the Linode CCM can integrate with [Cilium's BGP Control Plane](https://docs.cilium.io/en/stable/network/bgp-control-plane/)
to perform load-balancing via IP sharing on labelled Nodes. This option does not create a backing NodeBalancer and instead
to perform load-balancing via IP sharing on labeled Nodes. This option does not create a backing NodeBalancer and instead
provisions a new IP on an ip-holder Nanode to share. See [Shared IP LoadBalancing](#shared-ip-load-balancing).

#### Annotations
Expand Down Expand Up @@ -64,7 +64,6 @@ Annotation (Suffix) | Values | Default | Description
`tags` | string | | A comma seperated list of tags to be applied to the createad NodeBalancer instance
`firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewalls](#firewalls).
`firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. Adding this annotation creates a new CCM managed Linode CloudFirewall instance. See [Firewalls](#firewalls).
`type` | `nodebalancer`, `cilium-bgp` | | The type of load-balancert to use. If unspecified, the type specified in the CCM's `--default-load-balancer` is used for the Service.

#### Deprecated Annotations
These annotations are deprecated, and will be removed in a future release.
Expand All @@ -88,7 +87,7 @@ Key | Values | Default | Description
#### Shared IP Load-Balancing
**NOTE:** This feature requires contacting [Customer Support](https://www.linode.com/support/contact/) to enable provisioning additional IPs.

Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and either `--default-load-balancer` is set to `cilium-bgp` or the Service has the `service.beta.kubernetes.io/linode-loadbalancer-type` annotation set to `cilium-bgp`. Additionally, the `LINODE_URL` environment variable in the linode CCM needs to be set to "https://api.linode.com/v4beta".
Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and either `--load-balancer-type` is set to `cilium-bgp`. Additionally, the `LINODE_URL` environment variable in the linode CCM needs to be set to "https://api.linode.com/v4beta" for IP sharing to work.

This feature requires the Kubernetes cluster to be using [Cilium](https://cilium.io/) as the CNI with the `bgp-control-plane` feature enabled.

Expand All @@ -112,7 +111,7 @@ spec:
- --port=0
- --secure-port=10253
- --bgp-node-selector=cilium-bgp-peering=true
- --default-load-balancer=cilium-bgp
- --load-balancer-type=cilium-bgp
volumeMounts:
- mountPath: /etc/kubernetes
name: k8s
Expand Down
121 changes: 9 additions & 112 deletions cloud/linode/cilium_loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
k8sClient "github.com/cilium/cilium/pkg/k8s/client"
fakev2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake"
"github.com/golang/mock/gomock"
"github.com/linode/linode-cloud-controller-manager/cloud/annotations"
"github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks"
"github.com/linode/linodego"
v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -76,14 +75,6 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode",
f: testCreateWithExistingIPHolder,
},
{
name: "Create Cilium Load Balancer With default loadBalancerClass set",
f: testCreateWithDefaultLBCilium,
},
{
name: "Create Cilium Load Balancer With default loadBalancerClass set for nodebalancer service",
f: testCreateWithDefaultLBCiliumNodebalancher,
},
{
name: "Create Cilium Load Balancer With no existing IP holder nanode",
f: testCreateWithNoExistingIPHolder,
Expand All @@ -92,10 +83,6 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
name: "Delete Cilium Load Balancer",
f: testEnsureCiliumLoadBalancerDeleted,
},
{
name: "Delete NodeBalancer With default loadBalancerClass set to Cilium",
f: testDeleteNBWithDefaultLBCilium,
},
}
for _, tc := range testCases {
ctrl := gomock.NewController(t)
Expand All @@ -107,7 +94,7 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
}
}

func createTestService(lbType *string) *v1.Service {
func createTestService() *v1.Service {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: randString(),
Expand All @@ -131,9 +118,6 @@ func createTestService(lbType *string) *v1.Service {
},
},
}
if lbType != nil {
svc.Annotations = map[string]string{annotations.AnnLinodeLoadBalancerType: *lbType}
}

return svc
}
Expand All @@ -155,12 +139,12 @@ func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) {
}

func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) {
svc := createTestService(Pointer(ciliumLBType))
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, nodeBalancerLBType}
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if !errors.Is(err, errNoBGPSelector) {
Expand All @@ -173,12 +157,12 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) {

func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(Pointer(ciliumLBType))
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, nodeBalancerLBType}
lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType}

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if err == nil {
Expand All @@ -191,40 +175,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) {

func testCreateWithExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(Pointer(ciliumLBType))

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
addNodes(t, kubeClient, nodes)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, nodeBalancerLBType}

filter := map[string]string{"label": ipHolderLabel}
rawFilter, _ := json.Marshal(filter)
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil)
dummySharedIP := "45.76.101.26"
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 11111,
}).Times(1)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 22222,
}).Times(1)

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
if lbStatus == nil {
t.Fatal("expected non-nil lbStatus")
}
}

func testCreateWithDefaultLBCilium(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(nil)
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -255,36 +206,9 @@ func testCreateWithDefaultLBCilium(t *testing.T, mc *mocks.MockClient) {
}
}

func testCreateWithDefaultLBCiliumNodebalancher(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(Pointer(nodeBalancerLBType))

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
addNodes(t, kubeClient, nodes)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

mc.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()).Times(1).Return(&linodego.NodeBalancer{
ID: 12345,
Label: Pointer("foobar"),
Region: "us-west",
Hostname: Pointer("foobar-nb"),
IPv4: Pointer("1.2.3.4"),
}, nil)

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
if lbStatus == nil {
t.Fatal("expected non-nil lbStatus")
}
}

func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(nil)
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
Expand Down Expand Up @@ -318,13 +242,13 @@ func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) {

func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(Pointer(ciliumLBType))
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
addNodes(t, kubeClient, nodes)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, nodeBalancerLBType}
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

dummySharedIP := "45.76.101.26"
svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}}
Expand All @@ -341,30 +265,3 @@ func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) {
t.Fatalf("expected a nil error, got %v", err)
}
}

func testDeleteNBWithDefaultLBCilium(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService(Pointer(nodeBalancerLBType))

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

dummySharedIP := "45.76.101.26"
svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}}

mc.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.NodeBalancer{{
ID: 12345,
Label: Pointer("foobar"),
Region: "us-west",
Hostname: Pointer("foobar-nb"),
IPv4: Pointer("1.2.3.4"),
}}, nil)
mc.EXPECT().DeleteNodeBalancer(gomock.Any(), gomock.Any()).Times(1).Return(nil)

err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
}
6 changes: 3 additions & 3 deletions cloud/linode/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ var Options struct {
LinodeGoDebug bool
EnableRouteController bool
VPCName string
DefaultLoadBalancer string
LoadBalancerType string
BGPNodeSelector string
}

Expand Down Expand Up @@ -119,10 +119,10 @@ func newCloud() (cloudprovider.Interface, error) {
return nil, fmt.Errorf("routes client was not created successfully: %w", err)
}

if Options.DefaultLoadBalancer != "" && !slices.Contains(supportedLoadBalancerTypes, Options.DefaultLoadBalancer) {
if Options.LoadBalancerType != "" && !slices.Contains(supportedLoadBalancerTypes, Options.LoadBalancerType) {
return nil, fmt.Errorf(
"unsupported default load-balancer type %s. Options are %v",
Options.DefaultLoadBalancer,
Options.LoadBalancerType,
supportedLoadBalancerTypes,
)
}
Expand Down
28 changes: 10 additions & 18 deletions cloud/linode/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ func (e lbNotFoundError) Error() string {
}

type loadbalancers struct {
client client.Client
zone string
kubeClient kubernetes.Interface
ciliumClient ciliumclient.CiliumV2alpha1Interface
defaultLB string
client client.Client
zone string
kubeClient kubernetes.Interface
ciliumClient ciliumclient.CiliumV2alpha1Interface
loadBalancerType string
}

type portConfigAnnotation struct {
Expand All @@ -66,7 +66,7 @@ type portConfig struct {

// newLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer.
func newLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer {
return &loadbalancers{client: client, zone: zone, defaultLB: Options.DefaultLoadBalancer}
return &loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType}
}

func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) {
Expand Down Expand Up @@ -156,14 +156,6 @@ func (l *loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.S
return fmt.Sprintf("ccm-%s", unixNano[len(unixNano)-12:])
}

func (l *loadbalancers) isCiliumLBType(service *v1.Service) bool {
if (l.defaultLB == ciliumLBType && service.GetAnnotations()[annotations.AnnLinodeLoadBalancerType] != nodeBalancerLBType) ||
service.GetAnnotations()[annotations.AnnLinodeLoadBalancerType] == ciliumLBType {
return true
}
return false
}

// GetLoadBalancer returns the *v1.LoadBalancerStatus of service.
//
// GetLoadBalancer will not modify service.
Expand All @@ -173,7 +165,7 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string,
sentry.SetTag(ctx, "service", service.Name)

// Handle LoadBalancers backed by Cilium
if l.isCiliumLBType(service) {
if l.loadBalancerType == ciliumLBType {
return &v1.LoadBalancerStatus{
Ingress: service.Status.LoadBalancer.Ingress,
}, true, nil
Expand Down Expand Up @@ -206,7 +198,7 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri
serviceNn := getServiceNn(service)

// Handle LoadBalancers backed by Cilium
if l.isCiliumLBType(service) {
if l.loadBalancerType == ciliumLBType {
klog.Infof("handling LoadBalancer Service %s as %s", serviceNn, ciliumLBClass)

if err = l.ensureCiliumBGPPeeringPolicy(ctx); err != nil {
Expand Down Expand Up @@ -433,7 +425,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri
sentry.SetTag(ctx, "service", service.Name)

// handle LoadBalancers backed by Cilium
if l.isCiliumLBType(service) {
if l.loadBalancerType == ciliumLBType {
klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass)
// make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers
for _, node := range nodes {
Expand Down Expand Up @@ -504,7 +496,7 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa
sentry.SetTag(ctx, "service", service.Name)

// Handle LoadBalancers backed by Cilium
if l.isCiliumLBType(service) {
if l.loadBalancerType == ciliumLBType {
klog.Infof("handling LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass)
if err := l.deleteSharedIP(ctx, service); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion deploy/chart/templates/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ spec:
{{- end }}
{{- if .Values.sharedIPLoadBalancing }}
- --bgp-node-selector={{ required "A valid .Values.sharedIPLoadBalancing.bgpNodeSelector is required for shared IP load-balancing" .Values.sharedIPLoadBalancing.bgpNodeSelector }}
- --default-load-balancer={{ default "nodebalancer" .Values.sharedIPLoadBalancing.defaultLoadBalancer }}
- --load-balancer-type={{ required "A valid .Values.sharedIPLoadBalancing.loadBalancerType is required for shared IP load-balancing" .Values.sharedIPLoadBalancing.defaultLoadBalancer }}
{{- end }}
volumeMounts:
- mountPath: /etc/kubernetes
Expand Down
2 changes: 1 addition & 1 deletion deploy/chart/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ tolerations:

# Options for LoadBalancers backed by shared IPs instead of NodeBalancers
# sharedIPLoadBalancing:
# defaultLoadBalancer: <nodebalancer or cilium-bgp>
# loadBalancerType: cilium-bgp
# bgpNodeSelector: <node label (e.g. cilium-bgp-peering=true)>

# This section adds ability to enable route-controller for ccm
Expand Down
2 changes: 1 addition & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func main() {
command.Flags().BoolVar(&linode.Options.LinodeGoDebug, "linodego-debug", false, "enables debug output for the LinodeAPI wrapper")
command.Flags().BoolVar(&linode.Options.EnableRouteController, "enable-route-controller", false, "enables route_controller for ccm")
command.Flags().StringVar(&linode.Options.VPCName, "vpc-name", "", "vpc name whose routes will be managed by route-controller")
command.Flags().StringVar(&linode.Options.DefaultLoadBalancer, "default-load-balancer", "nodebalancer", "configures which type of load-balancing to use for LoadBalancer Services by default (options: nodebalancer, cilium-bgp)")
command.Flags().StringVar(&linode.Options.LoadBalancerType, "load-balancer-type", "nodebalancer", "configures which type of load-balancing to use for LoadBalancer Services (options: nodebalancer, cilium-bgp)")
command.Flags().StringVar(&linode.Options.BGPNodeSelector, "bgp-node-selector", "", "node selector to use to perform shared IP fail-over with BGP (e.g. cilium-bgp-peering=true")

// Set static flags
Expand Down

0 comments on commit bd542af

Please sign in to comment.