From 19d774349a98e32075051c6cded82195063202ab Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Fri, 8 Nov 2024 16:13:08 +0800 Subject: [PATCH] controller: Do not check volume status if we donot have permission - Also, remove the redundant lhclient on node server Signed-off-by: Vicente Cheng --- pkg/csi/controller_server.go | 33 ++++++++++++++++++++++----------- pkg/csi/manager.go | 2 +- pkg/csi/node_server.go | 5 +---- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/pkg/csi/controller_server.go b/pkg/csi/controller_server.go index bea1fd4e..cb094505 100644 --- a/pkg/csi/controller_server.go +++ b/pkg/csi/controller_server.go @@ -37,8 +37,9 @@ const ( ) type ControllerServer struct { - namespace string - hostStorageClass string + namespace string + hostStorageClass string + checkLHVolumeStatus bool coreClient ctlv1.Interface storageClient ctlstoragev1.Interface @@ -54,19 +55,26 @@ func NewControllerServer(coreClient ctlv1.Interface, storageClient ctlstoragev1. accessMode := []csi.VolumeCapability_AccessMode_Mode{ csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, } + checkLHVolumeStatus := true + // to handle well with previous Harvester cluster if _, err := harvNetFSClient.HarvesterhciV1beta1().NetworkFilesystems(HarvesterNS).List(context.TODO(), metav1.ListOptions{}); err == nil { accessMode = append(accessMode, csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER) } else { logrus.Warnf("Failed to list NetworkFilesystems, skip RWX volume support with error: %v", err) } + if _, err := lhClient.LonghornV1beta2().Volumes(LonghornNS).List(context.TODO(), metav1.ListOptions{}); err != nil { + logrus.Warnf("Failed to list Longhorn volumes, skip checking Longhorn volume status with error: %v", err) + checkLHVolumeStatus = false + } return &ControllerServer{ - namespace: namespace, - hostStorageClass: hostStorageClass, - coreClient: coreClient, - storageClient: storageClient, - virtClient: virtClient, - lhClient: lhClient, - harvNetFSClient: harvNetFSClient, + namespace: namespace, + hostStorageClass: hostStorageClass, + checkLHVolumeStatus: checkLHVolumeStatus, + coreClient: coreClient, + storageClient: storageClient, + virtClient: virtClient, + lhClient: lhClient, + harvNetFSClient: harvNetFSClient, caps: getControllerServiceCapabilities( []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, @@ -298,7 +306,7 @@ func (cs *ControllerServer) ControllerPublishVolume(_ context.Context, req *csi. // we should wait for the volume to be detached from the previous node // Wait until engine confirmed that rebuild started if err := wait.PollUntilContextTimeout(context.Background(), tickAttachDetach, timeoutAttachDetach, true, func(context.Context) (bool, error) { - return waitForVolSettled(cs.lhClient, lhVolumeName, req.GetNodeId()) + return waitForVolSettled(cs.lhClient, lhVolumeName, req.GetNodeId(), cs.checkLHVolumeStatus) }); err != nil { return nil, status.Errorf(codes.DeadlineExceeded, "Failed to wait the volume %s status to settled", req.GetVolumeId()) } @@ -669,7 +677,10 @@ func getVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) [ return vca } -func waitForVolSettled(lhClient *lhclientset.Clientset, lhVolName, nodeID string) (bool, error) { +func waitForVolSettled(lhClient *lhclientset.Clientset, lhVolName, nodeID string, checkLHVolumeStatus bool) (bool, error) { + if !checkLHVolumeStatus { + return true, nil + } volume, err := lhClient.LonghornV1beta2().Volumes(LonghornNS).Get(context.TODO(), lhVolName, metav1.GetOptions{}) if err != nil { logrus.Warnf("waitForVolumeSettled: error while waiting for volume %s to be settled. Err: %v", lhVolName, err) diff --git a/pkg/csi/manager.go b/pkg/csi/manager.go index 816cb7a2..c9b4f869 100644 --- a/pkg/csi/manager.go +++ b/pkg/csi/manager.go @@ -117,7 +117,7 @@ func (m *Manager) Run(cfg *config.Config) error { } m.ids = NewIdentityServer(driverName, version.FriendlyVersion()) - m.ns = NewNodeServer(coreClient.Core().V1(), virtClient, lhclient, harvNetworkFSClient, nodeID, namespace, restConfig.Host) + m.ns = NewNodeServer(coreClient.Core().V1(), virtClient, harvNetworkFSClient, nodeID, namespace, restConfig.Host) m.cs = NewControllerServer(coreClient.Core().V1(), storageClient.Storage().V1(), virtClient, lhclient, harvNetworkFSClient, namespace, cfg.HostStorageClass) // Create GRPC servers diff --git a/pkg/csi/node_server.go b/pkg/csi/node_server.go index 9e20fa6b..3e9611d4 100644 --- a/pkg/csi/node_server.go +++ b/pkg/csi/node_server.go @@ -12,7 +12,6 @@ import ( common "github.com/harvester/go-common/common" networkfsv1 "github.com/harvester/networkfs-manager/pkg/apis/harvesterhci.io/v1beta1" harvnetworkfsset "github.com/harvester/networkfs-manager/pkg/generated/clientset/versioned" - lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned" "github.com/pkg/errors" ctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" "github.com/sirupsen/logrus" @@ -36,11 +35,10 @@ type NodeServer struct { nodeID string caps []*csi.NodeServiceCapability vip string - lhClient *lhclientset.Clientset harvNetFSClient *harvnetworkfsset.Clientset } -func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient, lhClient *lhclientset.Clientset, harvNetFSClient *harvnetworkfsset.Clientset, nodeID string, namespace, vip string) *NodeServer { +func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient, harvNetFSClient *harvnetworkfsset.Clientset, nodeID string, namespace, vip string) *NodeServer { return &NodeServer{ coreClient: coreClient, virtClient: virtClient, @@ -52,7 +50,6 @@ func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, }), vip: vip, - lhClient: lhClient, harvNetFSClient: harvNetFSClient, } }