Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurefile: kata-cc: add confidential node conditional #2346

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions deploy/rbac-csi-azurefile-node.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,24 @@ roleRef:
name: csi-azurefile-node-katacc-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azurefile-node-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azurefile-node-binding
subjects:
- kind: ServiceAccount
name: csi-azurefile-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azurefile-node-role
apiGroup: rbac.authorization.k8s.io
31 changes: 31 additions & 0 deletions pkg/azurefile/azurefile.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
mount "k8s.io/mount-utils"

Expand Down Expand Up @@ -453,7 +454,12 @@
csi.RegisterControllerServer(server, d)
csi.RegisterNodeServer(server, d)
d.server = server
val, val2, err := getNodeInfoFromLabels(ctx, d.NodeID, d.kubeClient)
if err != nil {
klog.Warningf("failed to get node info from labels: %v", err)
}

klog.V(2).Infof("Node info from labels: %s, %s", val, val2)
listener, err := csicommon.ListenEndpoint(d.endpoint)
if err != nil {
klog.Fatalf("failed to listen endpoint: %v", err)
Expand Down Expand Up @@ -1274,3 +1280,28 @@
}
return d.cloud.Environment.StorageEndpointSuffix
}

func getNodeInfoFromLabels(ctx context.Context, nodeId string, kubeClient clientset.Interface) (string, string, error) {

Check failure on line 1284 in pkg/azurefile/azurefile.go

View workflow job for this annotation

GitHub Actions / Go Lint

var-naming: func parameter nodeId should be nodeID (revive)
if kubeClient == nil || kubeClient.CoreV1() == nil {
return "", "", fmt.Errorf("kubeClient is nil")
}

node, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeId, metav1.GetOptions{})
if err != nil {
return "", "", fmt.Errorf("get node(%s) failed with %v", nodeId, err)
}

if len(node.Labels) == 0 {
return "", "", fmt.Errorf("node(%s) label is empty", nodeId)
}
return node.Labels["kubernetes.azure.com/kata-mshv-vm-isolation"], node.Labels["katacontainers.io/kata-runtime"], nil
}

func isNodeConfidential(ctx context.Context, nodeId string, kubeClient clientset.Interface) bool {

Check failure on line 1300 in pkg/azurefile/azurefile.go

View workflow job for this annotation

GitHub Actions / Go Lint

var-naming: func parameter nodeId should be nodeID (revive)
val, val2, err := getNodeInfoFromLabels(ctx, nodeId, kubeClient)
if err != nil {
klog.Warningf("get node(%s) confidential label failed with %v", nodeId, err)
return false
}
return val == "true" || val2 == "true"
}
13 changes: 4 additions & 9 deletions pkg/azurefile/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu
}

if d.enableKataCCMount {
enableKataCCMount := getValueInMap(context, enableKataCCMountField)
if strings.EqualFold(enableKataCCMount, trueValue) && context[podNameField] != "" && context[podNamespaceField] != "" {
enableKataCCMount := isNodeConfidential(ctx, d.NodeID, d.kubeClient)
if enableKataCCMount && context[podNameField] != "" && context[podNamespaceField] != "" {
runtimeClass, err := getRuntimeClassForPodFunc(ctx, d.kubeClient, context[podNameField], context[podNamespaceField])
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to get runtime class for pod %s/%s: %v", context[podNamespaceField], context[podNameField], err)
Expand Down Expand Up @@ -252,7 +252,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
// don't respect fsType from req.GetVolumeCapability().GetMount().GetFsType()
// since it's ext4 by default on Linux
var fsType, server, protocol, ephemeralVolMountOptions, storageEndpointSuffix, folderName string
var ephemeralVol, enableKataCCMount bool
var ephemeralVol bool
fileShareNameReplaceMap := map[string]string{}

mountPermissions := d.mountPermissions
Expand Down Expand Up @@ -284,11 +284,6 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
fileShareNameReplaceMap[pvcNameMetadata] = v
case pvNameKey:
fileShareNameReplaceMap[pvNameMetadata] = v
case enableKataCCMountField:
enableKataCCMount, err = strconv.ParseBool(v)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "invalid %s: %s in storage class", enableKataCCMountField, v)
}
case mountPermissionsField:
if v != "" {
var err error
Expand Down Expand Up @@ -423,7 +418,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
}
klog.V(2).Infof("volume(%s) mount %s on %s succeeded", volumeID, source, cifsMountPath)
}

enableKataCCMount := isNodeConfidential(ctx, d.NodeID, d.kubeClient)
// If runtime OS is not windows and protocol is not nfs, save mountInfo.json
if d.enableKataCCMount && enableKataCCMount {
if runtime.GOOS != "windows" && protocol != nfs {
Expand Down
Loading