diff --git a/cmd/download/download.go b/cmd/download/download.go index 44c623ed27..76c679bcd1 100644 --- a/cmd/download/download.go +++ b/cmd/download/download.go @@ -24,8 +24,8 @@ var ( # Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names kubescape download framework nsa - # Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names - kubescape download control "Allowed hostPath" + # Download the "HostPath mount" control. Run 'kubescape list controls' for all controls names + kubescape download control "HostPath mount" # Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids kubescape download control C-0001 diff --git a/cmd/scan/control.go b/cmd/scan/control.go index 30b0bb3a55..f414d91373 100644 --- a/cmd/scan/control.go +++ b/cmd/scan/control.go @@ -23,7 +23,7 @@ var ( kubescape scan control "privileged container" # Scan list of controls separated with a comma - kubescape scan control "privileged container","allowed hostpath" + kubescape scan control "privileged container","HostPath mount" # Scan list of controls using the control ID separated with a comma kubescape scan control C-0058,C-0057 @@ -61,7 +61,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman if err := validateFrameworkScanInfo(scanInfo); err != nil { return err } - + // flagValidationControl(scanInfo) scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{} diff --git a/core/mocks/loadmocks.go b/core/mocks/loadmocks.go index 4c35771648..fafab1df1d 100644 --- a/core/mocks/loadmocks.go +++ b/core/mocks/loadmocks.go @@ -8,7 +8,7 @@ import ( "github.com/kubescape/opa-utils/reporthandling" ) -var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}` +var mockControl_0006 = `{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}` var mockControl_0044 = `{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}` @@ -31,7 +31,7 @@ func MockFramework_0013() *reporthandling.Framework { return fw } -// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "Allowed hostPath" +// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "HostPath mount" func MockFramework_0006_0013() *reporthandling.Framework { fw := &reporthandling.Framework{ PortalBase: armotypes.PortalBase{ diff --git a/core/pkg/resultshandling/results_test.go b/core/pkg/resultshandling/results_test.go index 7eec01dac9..56cc56c267 100644 --- a/core/pkg/resultshandling/results_test.go +++ b/core/pkg/resultshandling/results_test.go @@ -1,7 +1,7 @@ package resultshandling var mockFramework_0044 = `{"guid":"","name":"fw-0044","attributes":{"armoBuiltin":true},"creationTime":"","description":"Implement NSA security advices for K8s ","controls":[{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}]}` -var mockFramework_0006_0013 = `{"guid":"","name":"fw-0006-0013","attributes":{"armoBuiltin":true},"creationTime":"","description":"Implement NSA security advices for K8s ","controls":[{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6},{"guid":"","name":"Non-root containers","attributes":{"armoBuiltin":true},"id":"C-0013","controlID":"C-0013","creationTime":"","description":"Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This contol identifies all the Pods running as root or can escalate to root.","remediation":"If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.","rules":[{"guid":"","name":"non-root-containers","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath =\"spec.\"\n result := isRootPod(pod, container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootPod(wl.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n result := isRootPod(wl.spec.jobTemplate.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n pod.spec.securityContext.runAsUser == 0\n\tpath = \"spec.securityContext.runAsUser\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n\tnot container.securityContext.runAsGroup\n\tnot container.securityContext.runAsNonRoot\n not pod.spec.securityContext.runAsUser\n\tnot pod.spec.securityContext.runAsGroup\n pod.spec.securityContext.runAsNonRoot == false\n\tpath = \"spec.securityContext.runAsNonRoot\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsGroup\n pod.spec.securityContext.runAsGroup == 0\n\tpath = sprintf(\"%vsecurityContext.runAsGroup\", [begginingOfPath])\n}\n\nisRootPod(pod, container, i, begginingOfPath)= path {\n\tpath = \"\"\n\tnot pod.spec.securityContext.runAsGroup\n\tnot pod.spec.securityContext.runAsUser\n \tcontainer.securityContext.runAsNonRoot == false\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsNonRoot\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsUser == 0\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsUser\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsGroup == 0\n\t path = sprintf(\"%vcontainers[%v].securityContext.runAsGroup\", [begginingOfPath, format_int(i, 10)])\n}","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container can run as root","remediation":"Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":6}]}` +var mockFramework_0006_0013 = `{"guid":"","name":"fw-0006-0013","attributes":{"armoBuiltin":true},"creationTime":"","description":"Implement NSA security advices for K8s ","controls":[{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6},{"guid":"","name":"Non-root containers","attributes":{"armoBuiltin":true},"id":"C-0013","controlID":"C-0013","creationTime":"","description":"Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This contol identifies all the Pods running as root or can escalate to root.","remediation":"If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.","rules":[{"guid":"","name":"non-root-containers","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath =\"spec.\"\n result := isRootPod(pod, container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootPod(wl.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n result := isRootPod(wl.spec.jobTemplate.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n pod.spec.securityContext.runAsUser == 0\n\tpath = \"spec.securityContext.runAsUser\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n\tnot container.securityContext.runAsGroup\n\tnot container.securityContext.runAsNonRoot\n not pod.spec.securityContext.runAsUser\n\tnot pod.spec.securityContext.runAsGroup\n pod.spec.securityContext.runAsNonRoot == false\n\tpath = \"spec.securityContext.runAsNonRoot\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsGroup\n pod.spec.securityContext.runAsGroup == 0\n\tpath = sprintf(\"%vsecurityContext.runAsGroup\", [begginingOfPath])\n}\n\nisRootPod(pod, container, i, begginingOfPath)= path {\n\tpath = \"\"\n\tnot pod.spec.securityContext.runAsGroup\n\tnot pod.spec.securityContext.runAsUser\n \tcontainer.securityContext.runAsNonRoot == false\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsNonRoot\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsUser == 0\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsUser\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsGroup == 0\n\t path = sprintf(\"%vcontainers[%v].securityContext.runAsGroup\", [begginingOfPath, format_int(i, 10)])\n}","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container can run as root","remediation":"Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":6}]}` // func TestReportV2ToV1(t *testing.T) { // opaSessionObj := cautils.OPASessionObj{} diff --git a/examples/exceptions/README.md b/examples/exceptions/README.md index 3ef50ef189..e6c7fa7371 100644 --- a/examples/exceptions/README.md +++ b/examples/exceptions/README.md @@ -64,24 +64,24 @@ But if you wish to exclude all namespaces **OR** any resource with the label `"e Same works with the `posturePolicies` list -> -e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows: +e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `HostPath mount` control, the `posturePolicies` list should look as follows: ``` "posturePolicies": [ { "frameworkName": "NSA", - "controlName": "Allowed hostPath" + "controlName": "HostPath mount" } ] ``` -But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows: +But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `HostPath mount` control, the `posturePolicies` list should look as follows: ``` "posturePolicies": [ { "frameworkName": "NSA" }, { - "controlName": "Allowed hostPath" + "controlName": "HostPath mount" } ] ``` @@ -122,7 +122,7 @@ The resources ] ``` -### Exclude deployments in the default namespace that failed the "Allowed hostPath" control +### Exclude deployments in the default namespace that failed the "HostPath mount" control ``` [ { @@ -142,7 +142,7 @@ The resources ], "posturePolicies": [ { - "controlName": "Allowed hostPath" + "controlName": "HostPath mount" } ] } diff --git a/examples/exceptions/exclude-allowed-hostPath-control.json b/examples/exceptions/exclude-allowed-hostPath-control.json index 106dfaa6bb..bf8db895d3 100644 --- a/examples/exceptions/exclude-allowed-hostPath-control.json +++ b/examples/exceptions/exclude-allowed-hostPath-control.json @@ -15,7 +15,7 @@ ], "posturePolicies": [ { - "controlName": "Allowed hostPath" + "controlName": "HostPath mount" } ] } diff --git a/examples/exceptions/exclude-deployments-in-ns-default.json b/examples/exceptions/exclude-deployments-in-ns-default.json index dceb40105a..a659544646 100644 --- a/examples/exceptions/exclude-deployments-in-ns-default.json +++ b/examples/exceptions/exclude-deployments-in-ns-default.json @@ -16,7 +16,7 @@ ], "posturePolicies": [ { - "controlName": "Allowed hostPath" + "controlName": "HostPath mount" } ] } diff --git a/examples/output_mocks/prometheus-verbose-flag.txt b/examples/output_mocks/prometheus-verbose-flag.txt index b798d4ec6e..f85f796c43 100644 --- a/examples/output_mocks/prometheus-verbose-flag.txt +++ b/examples/output_mocks/prometheus-verbose-flag.txt @@ -98,56 +98,56 @@ kubescape_object_failed_count{framework="NSA",control="Privileged container",nam kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 # Failed object from "NSA" control "Privileged container" kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 -# Number of resources found as part of NSA control Allowed hostPath -kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22 -# Number of resources excluded as part of NSA control Allowed hostPath -kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0 -# Number of resources failed as part of NSA control Allowed hostPath -kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Number of resources found as part of NSA control HostPath mount +kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22 +# Number of resources excluded as part of NSA control HostPath mount +kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0 +# Number of resources failed as part of NSA control HostPath mount +kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1 # Number of resources found as part of NSA control Automatic mapping of service account kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47 # Number of resources excluded as part of NSA control Automatic mapping of service account @@ -2668,56 +2668,56 @@ kubescape_object_failed_count{framework="ArmoBest",control="Privileged container kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 # Failed object from "ArmoBest" control "Privileged container" kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 -# Number of resources found as part of ArmoBest control Allowed hostPath -kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22 -# Number of resources excluded as part of ArmoBest control Allowed hostPath -kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0 -# Number of resources failed as part of ArmoBest control Allowed hostPath -kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Number of resources found as part of ArmoBest control HostPath mount +kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22 +# Number of resources excluded as part of ArmoBest control HostPath mount +kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0 +# Number of resources failed as part of ArmoBest control HostPath mount +kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1 # Number of resources found as part of ArmoBest control Automatic mapping of service account kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47 # Number of resources excluded as part of ArmoBest control Automatic mapping of service account diff --git a/examples/output_mocks/prometheus.txt b/examples/output_mocks/prometheus.txt index 51822bf846..333a1791e3 100644 --- a/examples/output_mocks/prometheus.txt +++ b/examples/output_mocks/prometheus.txt @@ -54,26 +54,26 @@ kubescape_resources_excluded_count{framework="NSA",control="Privileged container kubescape_resources_failed_count{framework="NSA",control="Privileged container"} 1 # Failed object from "NSA" control "Privileged container" kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 -# Number of resources found as part of NSA control Allowed hostPath -kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22 -# Number of resources excluded as part of NSA control Allowed hostPath -kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0 -# Number of resources failed as part of NSA control Allowed hostPath -kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "NSA" control "Allowed hostPath" -kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 +# Number of resources found as part of NSA control HostPath mount +kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22 +# Number of resources excluded as part of NSA control HostPath mount +kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0 +# Number of resources failed as part of NSA control HostPath mount +kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "NSA" control "HostPath mount" +kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 # Number of resources found as part of NSA control Automatic mapping of service account kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47 # Number of resources excluded as part of NSA control Automatic mapping of service account @@ -872,26 +872,26 @@ kubescape_resources_excluded_count{framework="ArmoBest",control="Privileged cont kubescape_resources_failed_count{framework="ArmoBest",control="Privileged container"} 1 # Failed object from "ArmoBest" control "Privileged container" kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 -# Number of resources found as part of ArmoBest control Allowed hostPath -kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22 -# Number of resources excluded as part of ArmoBest control Allowed hostPath -kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0 -# Number of resources failed as part of ArmoBest control Allowed hostPath -kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 -# Failed object from "ArmoBest" control "Allowed hostPath" -kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 +# Number of resources found as part of ArmoBest control HostPath mount +kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22 +# Number of resources excluded as part of ArmoBest control HostPath mount +kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0 +# Number of resources failed as part of ArmoBest control HostPath mount +kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1 +# Failed object from "ArmoBest" control "HostPath mount" +kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1 # Number of resources found as part of ArmoBest control Automatic mapping of service account kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47 # Number of resources excluded as part of ArmoBest control Automatic mapping of service account diff --git a/smoke_testing/test_scan.py b/smoke_testing/test_scan.py index 0a48663a9c..f890d8f821 100644 --- a/smoke_testing/test_scan.py +++ b/smoke_testing/test_scan.py @@ -13,15 +13,15 @@ def scan_all(kubescape_exec: str): def scan_control_name(kubescape_exec: str): - return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath', all_files, "--enable-host-scan=false"]) + return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount', all_files, "--enable-host-scan=false"]) def scan_control_id(kubescape_exec: str): - return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0006', all_files, "--enable-host-scan=false"]) + return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0048', all_files, "--enable-host-scan=false"]) def scan_controls(kubescape_exec: str): - return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath,Allow privilege escalation', all_files, "--enable-host-scan=false"]) + return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount,Allow privilege escalation', all_files, "--enable-host-scan=false"]) def scan_framework(kubescape_exec: str):