Skip to content

Commit

Permalink
controller: Do not check volume status if we donot have permission
Browse files Browse the repository at this point in the history
    - Also, remove the redundant lhclient on node server

Signed-off-by: Vicente Cheng <[email protected]>
  • Loading branch information
Vicente-Cheng authored and WebberHuang1118 committed Nov 12, 2024
1 parent 7cd9bf9 commit 19d7743
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 16 deletions.
33 changes: 22 additions & 11 deletions pkg/csi/controller_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ const (
)

type ControllerServer struct {
namespace string
hostStorageClass string
namespace string
hostStorageClass string
checkLHVolumeStatus bool

coreClient ctlv1.Interface
storageClient ctlstoragev1.Interface
Expand All @@ -54,19 +55,26 @@ func NewControllerServer(coreClient ctlv1.Interface, storageClient ctlstoragev1.
accessMode := []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
}
checkLHVolumeStatus := true
// to handle well with previous Harvester cluster
if _, err := harvNetFSClient.HarvesterhciV1beta1().NetworkFilesystems(HarvesterNS).List(context.TODO(), metav1.ListOptions{}); err == nil {
accessMode = append(accessMode, csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER)
} else {
logrus.Warnf("Failed to list NetworkFilesystems, skip RWX volume support with error: %v", err)
}
if _, err := lhClient.LonghornV1beta2().Volumes(LonghornNS).List(context.TODO(), metav1.ListOptions{}); err != nil {
logrus.Warnf("Failed to list Longhorn volumes, skip checking Longhorn volume status with error: %v", err)
checkLHVolumeStatus = false
}
return &ControllerServer{
namespace: namespace,
hostStorageClass: hostStorageClass,
coreClient: coreClient,
storageClient: storageClient,
virtClient: virtClient,
lhClient: lhClient,
harvNetFSClient: harvNetFSClient,
namespace: namespace,
hostStorageClass: hostStorageClass,
checkLHVolumeStatus: checkLHVolumeStatus,
coreClient: coreClient,
storageClient: storageClient,
virtClient: virtClient,
lhClient: lhClient,
harvNetFSClient: harvNetFSClient,
caps: getControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
Expand Down Expand Up @@ -298,7 +306,7 @@ func (cs *ControllerServer) ControllerPublishVolume(_ context.Context, req *csi.
// we should wait for the volume to be detached from the previous node
// Wait until engine confirmed that rebuild started
if err := wait.PollUntilContextTimeout(context.Background(), tickAttachDetach, timeoutAttachDetach, true, func(context.Context) (bool, error) {
return waitForVolSettled(cs.lhClient, lhVolumeName, req.GetNodeId())
return waitForVolSettled(cs.lhClient, lhVolumeName, req.GetNodeId(), cs.checkLHVolumeStatus)
}); err != nil {
return nil, status.Errorf(codes.DeadlineExceeded, "Failed to wait the volume %s status to settled", req.GetVolumeId())
}
Expand Down Expand Up @@ -669,7 +677,10 @@ func getVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) [
return vca
}

func waitForVolSettled(lhClient *lhclientset.Clientset, lhVolName, nodeID string) (bool, error) {
func waitForVolSettled(lhClient *lhclientset.Clientset, lhVolName, nodeID string, checkLHVolumeStatus bool) (bool, error) {
if !checkLHVolumeStatus {
return true, nil
}
volume, err := lhClient.LonghornV1beta2().Volumes(LonghornNS).Get(context.TODO(), lhVolName, metav1.GetOptions{})
if err != nil {
logrus.Warnf("waitForVolumeSettled: error while waiting for volume %s to be settled. Err: %v", lhVolName, err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/csi/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (m *Manager) Run(cfg *config.Config) error {
}

m.ids = NewIdentityServer(driverName, version.FriendlyVersion())
m.ns = NewNodeServer(coreClient.Core().V1(), virtClient, lhclient, harvNetworkFSClient, nodeID, namespace, restConfig.Host)
m.ns = NewNodeServer(coreClient.Core().V1(), virtClient, harvNetworkFSClient, nodeID, namespace, restConfig.Host)
m.cs = NewControllerServer(coreClient.Core().V1(), storageClient.Storage().V1(), virtClient, lhclient, harvNetworkFSClient, namespace, cfg.HostStorageClass)

// Create GRPC servers
Expand Down
5 changes: 1 addition & 4 deletions pkg/csi/node_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
common "github.com/harvester/go-common/common"
networkfsv1 "github.com/harvester/networkfs-manager/pkg/apis/harvesterhci.io/v1beta1"
harvnetworkfsset "github.com/harvester/networkfs-manager/pkg/generated/clientset/versioned"
lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned"
"github.com/pkg/errors"
ctlv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
Expand All @@ -36,11 +35,10 @@ type NodeServer struct {
nodeID string
caps []*csi.NodeServiceCapability
vip string
lhClient *lhclientset.Clientset
harvNetFSClient *harvnetworkfsset.Clientset
}

func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient, lhClient *lhclientset.Clientset, harvNetFSClient *harvnetworkfsset.Clientset, nodeID string, namespace, vip string) *NodeServer {
func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient, harvNetFSClient *harvnetworkfsset.Clientset, nodeID string, namespace, vip string) *NodeServer {
return &NodeServer{
coreClient: coreClient,
virtClient: virtClient,
Expand All @@ -52,7 +50,6 @@ func NewNodeServer(coreClient ctlv1.Interface, virtClient kubecli.KubevirtClient
csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
}),
vip: vip,
lhClient: lhClient,
harvNetFSClient: harvNetFSClient,
}
}
Expand Down

0 comments on commit 19d7743

Please sign in to comment.