k8s templates 🚀

patch-4
Prince Chaddha 2024-05-26 08:52:23 +04:00
parent 9afec19a5e
commit aedfb3905a
18 changed files with 850 additions and 0 deletions

View File

@ -0,0 +1,49 @@
id: k8s-cpu-limits-not-set
info:
name: CPU limits not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing CPU limits in Kubernetes Deployments, which can lead to excessive CPU usage and affect other applications
impact: |
Missing CPU limits in Kubernetes Deployments can cause excessive CPU usage that can starve other applications, leading to performance degradation across the cluster.
remediation: |
Set CPU limits for all containers in Kubernetes Deployments to ensure fair CPU resource distribution and prevent performance issues.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.limits && container.resources.limits.cpu)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks CPU limits.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402205d1ddc90198898e6c233e5aac1742de397db7c2a70008e5fbc0676562a86894d0220088faf06d908f7d7dd8c9d0de8a20a120ed129962c78ce589abff2bc9402014a:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,49 @@
id: k8s-cpu-requests-not-set
info:
name: CPU Requests not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing CPU requests in Kubernetes Deployments, which can lead to inadequate scheduling and resource allocation.
impact: |
Missing CPU requests in Kubernetes Deployments can cause poor scheduling decisions and suboptimal resource allocation, potentially leading to degraded application performance.
remediation: |
Set CPU requests for all containers in Kubernetes Deplayments to ensure efficient scheduling and resource allocation.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.requests && container.resources.requests.cpu)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks CPU requests.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502205bb61869d917787455c6863fe6115682cf34d1267258db863a2e6380ccc9049d022100b783912d411495b9b93ed83dbd6318049f59fc40574db2c881e9b82e95566e4d:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-default-namespace-used
info:
name: Default Namespace Usage in Deployments
author: princechaddha
severity: high
description: Checks if Kubernetes Deployments are using the default namespace, which can lead to security risks and mismanagement issues.
impact: |
Using the default namespace for Kubernetes Deployments can increase security risks as it might allow broader access than necessary. It also complicates resource management across multiple teams and applications.
remediation: |
Avoid using the default namespace for Kubernetes Deployments. Create and specify dedicated namespaces tailored to specific applications or teams to enhance security and manage resources effectively.
reference:
- https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
tags: cloud,devops,kubernetes,k8s,devsecops,namespaces
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.metadata.namespace.toLowerCase() === "default") {
let result = (`Deployment '${deployment.metadata.name}' is using the default namespace, which is not recommended.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100f48af70d2fd53e58592c7c951684543c97128981f343667899279760d37658fb022017c6dc9cbae1b0be18b1e11214bc5e1f51d909339c63b354a8d56a62f496ab79:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-host-network-namespace-shared
info:
name: Host Network Namespace Sharing
author: princechaddha
severity: high
description: Checks if containers in Kubernetes Deployments are configured to share the host's network namespace, which can lead to security risks.
impact: |
Sharing the host's network namespace allows containers to access the host network directly. This can lead to potential security breaches as containers might bypass network policies and gain unrestricted network access on the host.
remediation: |
Ensure that the 'hostNetwork' field is set to false in all Kubernetes Deployments to prevent containers from sharing the host's network namespace.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces
tags: cloud,devops,kubernetes,k8s,devsecops,namespace
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.spec.template.spec.hostNetwork) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' is configured to share the host's network namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100cd66750a4f7a5b20df75d62c2f9321edc46cac18cfd5599f45adc232248d3653022100dff978e8738723e829177c98c67e250a3fa1f9f5b8fa36db590af252c936ef73:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-image-pull-policy-always
info:
name: Image Pull Policy set to Always
author: princechaddha
severity: low
description: Ensures that Kubernetes deployments have the image pull policy set to 'Always', which guarantees the most up-to-date version of the image is used.
impact: |
Not setting the image pull policy to 'Always' may cause pods to use outdated versions of images, which can lead to security vulnerabilities if the images contain fixes or updates.
remediation: Update the image pull policy in Kubernetes Deployments to 'Always' to ensure that the latest container images are always used.
reference:
- https://kubernetes.io/docs/concepts/containers/images/#updating-images
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,images,docker
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.every(container => container.imagePullPolicy === 'Always')) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' does not have image pull policy set to Always.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a004730450220107f47e2cbc8e9728548ec55c0d38b8a766eee58f47d2ef94709c87fb4080509022100b074dd509ae27478b66fa9d6157e952ce3f1764f66d70df3ad45afff361bf0cb:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,52 @@
id: k8s-image-tag-not-fixed
info:
name: Image Tag should be fixed - not latest or blank
author: princechaddha
severity: low
description: Checks if Kubernetes Deployment container images are using tags other than 'latest' or blank, which can lead to unstable and unpredictable deployments.
impact: |
Using 'latest' or blank image tags can result in deploying non-reproducible container images, potentially leading to unexpected application behavior and difficulties in troubleshooting.
remediation: |
Use specific image tags for all containers in Kubernetes Deployments to ensure reproducibility and stability of application deployments.
reference:
- https://kubernetes.io/docs/concepts/containers/images/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
deployment.spec.template.spec.containers.forEach(container => {
const tag = container.image.split(':').pop();
if (tag === 'latest' || tag === '') {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' uses 'latest' or blank image tag for container '${container.name}'.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100bd8016086ff77ceec6ef9605c3ceccd549832cca05a8682c4f3700baa158d0c7022042bb21d930467ea83c19d742cdb8a4c271c91267eb97375ac82cf945df9c27f6:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-liveness-probe-not-configured
info:
name: Liveness Probe Not Configured in Deployments
author: princechaddha
severity: medium
description: Checks for missing liveness probes in Kubernetes Deployments, which are essential for managing container health and automatic recovery
impact: |
Absence of liveness probes can lead to unresponsive containers remaining in service, potentially degrading application performance and availability.
remediation: Configure liveness probes for all containers in Kubernetes Deployments to ensure proper health checks and automatic restarts of failing containers
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.livenessProbe)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks a configured liveness probe.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100e60638756964bc77d262b7457ee93f07c60fcfdfb8b8c9713be764334cba3d8e02210098435e5cb2898c8f1bbf07c33827dc5bde729593d17cbaa029e7acb9e7d55bda:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-memory-limits-not-set
info:
name: Memory limits not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing memory limits in Kubernetes Deployments, which can lead to resource contention and instability
impact: |
Missing memory limits in Kubernetes Deployments can cause resource contention and potential application instability.
remediation: Set memory limits for all containers in Kubernetes Deployments to ensure resource management and application stability
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.limits && container.resources.limits.memory)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks memory limits.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402205488e0d1d8975da93c8879b2f4a090004db84f9a75618b7454a62edc8b0b6c2202207555edc45bffad32c1d89b0e68ebbd836a588de88a0e2f71f201fbda052d023e:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-memory-requests-not-set
info:
name: Memory requests not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing memory requests in Kubernetes Deployments, which can lead to inefficient scheduling and potential node resource exhaustion.
impact: |
Missing memory requests in Kubernetes Deployments can lead to inefficient pod scheduling, causing potential resource exhaustion on nodes.
remediation: Set memory requests for all containers in Kubernetes Deployments to ensure efficient pod scheduling and node resource utilization.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.requests && container.resources.requests.memory)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks memory requests.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100fe240dbf4c3fb7cc25858eae6a77ff3f3af8cce2dd2386358374f3bb179d1c2502210088357bf87098f8125c3a6038dde87ba3ac750d253e525b786f11013219119a7b:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,51 @@
id: minimize-added-capabilities
info:
name: Minimize container added capabilities
author: princechaddha
severity: high
description: Checks for containers in Kubernetes Deployments with added capabilities beyond the default set, increasing security risks.
impact: |
Containers with additional capabilities are granted more privileges than necessary, potentially allowing them to bypass intended security restrictions. This increases the risk of exploitation and unauthorized access.
remediation: |
Ensure that no unnecessary capabilities are added to containers within Kubernetes Deployments. Use security contexts to define the minimum necessary privileges.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
for (const container of deployment.spec.template.spec.containers) {
if (container.securityContext && container.securityContext.capabilities && container.securityContext.capabilities.add && container.securityContext.capabilities.add.length > 0) {
let addedCaps = container.securityContext.capabilities.add.join(', ');
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' has added capabilities: ${addedCaps}.`);
Export(result);
}
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100a4752be32718d5e3bf67d19c2c12b73dd2ddd70a44dc46ce8684cc5a70231e26022100f5f8e9f911051eeb8a41db141b0b2511c798e6d9be50aeef740011123d239ce6:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-privileged-containers
info:
name: Privileged Containers Found in Deployments
author: princechaddha
severity: critical
description: Checks for containers running in privileged mode within Kubernetes Deployments, which can pose significant security risks.
impact: |
Running containers in privileged mode grants them access to host resources and could lead to security breaches if the container is compromised.
remediation: |
Ensure that no container in Kubernetes Deployments runs in privileged mode. Modify the security context for each container to set `privileged: false`.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.spec.template.spec.containers.some(container => container.securityContext && container.securityContext.privileged)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' is running one or more containers in privileged mode.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100f896dc71c39119ecfd505a21a22dfcec6e06896f4d4b528cec13cf998181dd3902203458b94c2d0bf33603b64f5aa503123505a983941b01b9d0aada0b8985666044:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-readiness-probe-not-set
info:
name: Readiness Probes not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing readiness probes in Kubernetes Deployments, which can lead to traffic being sent to unready containers
impact: |
Not configuring readiness probes in Kubernetes Deployments can result in the routing of traffic to containers that are not ready to handle requests, leading to potential downtime or degraded performance.
remediation: |
Define readiness probes in all containers within your Kubernetes Deployments to ensure that traffic is only routed to containers that are fully prepared to handle it.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.readinessProbe)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks readiness probes.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a004730450220592b68587cbae5ddf8d0c03554b81c57008a3878d45b2e878666aae7661e886d022100a8bffd45d8dcf071df20071daf0542bf6bbc9424e86c2aa38c522d7b10c6be3b:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-root-container-admission
info:
name: Minimize the admission of root containers
author: princechaddha
severity: critical
description: Checks if any Kubernetes Deployments admit containers that run as root, which can pose a significant security risk.
impact: |
Allowing containers to run as root can lead to privilege escalation and unauthorized access to host resources, significantly compromising the security of the cluster.
remediation: |
Configure security contexts for all pods to run containers with a non-root user. Use Pod Security Policies or OPA/Gatekeeper to enforce these configurations.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups
tags: cloud,devops,kubernetes,devsecops,deployments,k8s
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.spec.template.spec.containers.some(container => container.securityContext && container.securityContext.runAsUser === null)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' permits containers to run as root.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502200475eac5bae9aa7fe9c98e0a90aa3680da94db7a15515256680d16a9fa319f8d022100b70835b10aa49e8a2961b2d957e41cc8e37f64cd066f36e292eb681900c19133:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,31 @@
id: kubernetes-code-env
info:
name: Kubernetes Cluster Validation
author: princechaddha
severity: info
description: |
Checks if kubernetes CLI is set up and all necessary tools are installed on the environment.
reference:
- https://kubernetes.io/
tags: cloud,devops,kubernetes,k8s,kubernetes-cloud-config
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl version
extractors:
- type: regex
internal: true
name: server_version
group: 1
regex:
- 'Server Version:\s*(v[0-9]+\.[0-9]+\.[0-9]+)'
- type: dsl
dsl:
- '"kubectl is successfully connected to the Kubernetes API Server Version: " + server_version'
# digest: 4a0a004730450220619359f90501cb0301a0a76aea235c33bf0d89d0295b92f754fedbadae743cad02210081662171da38b9b6cbeee60e25edb542f288457118bde3833e2ea4e784333f18:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,50 @@
id: k8s-allow-privilege-escalation-set
info:
name: Containers run with allowPrivilegeEscalation enabled
author: princechaddha
severity: critical
description: Checks for containers running with the allowPrivilegeEscalation flag enabled, which can increase security risks by allowing privileges to be escalated
impact: |
Enabling allowPrivilegeEscalation in container deployments can result in elevated privileges, potentially allowing attackers to gain further access to host resources. This poses significant security risks.
remediation: Ensure that the allowPrivilegeEscalation flag is set to false in all container configurations to minimize security risks
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,security,devsecops,containers
flow: |
code(1);
for (let container of template.items) {
set("container", container)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {pod: .metadata.name, containers: .spec.containers}'
javascript:
- code: |
let podData = JSON.parse(template.container); // container is now a JSON object with 'pod' and 'containers'
podData.containers.forEach(container => {
if (container.securityContext && container.securityContext.allowPrivilegeEscalation === true) {
let result = (`Container '${container.name}' in pod '${podData.pod}' running with allowPrivilegeEscalation enabled.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022025dc0e9a5d93ac04821b70467991a867158ff37e94f154ac26b67c9ebfaf6bdd02210094dde6b68d08983ae386767c31909fb6aa2a8edc5f257b81309eda1d5b0b0bb9:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-containers-share-host-ipc
info:
name: Containers sharing host IPC namespace
author: princechaddha
severity: critical
description: Checks if any containers in Kubernetes Pods are configured to share the host's IPC namespace, which can lead to security risks.
impact: |
Sharing the host's IPC namespace allows containers to access data across all containers on the same host, posing potential security risks.
remediation: Ensure that no container in Kubernetes Pods is set to share the host IPC namespace. Configure 'spec.hostIPC' to 'false' for all pods to isolate IPC namespaces.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,k8s,devsecops,pods
flow: |
code(1);
for (let pod of template.items) {
set("pod",pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
pod = JSON.parse(template.pod);
if (pod.spec.hostIPC) {
let result = (`Pod '${pod.metadata.name}' in namespace '${pod.metadata.namespace}' is configured to share the host IPC namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100a9730fb29a79c4eb4576ddb1000e7a07cce3aa571fc20f8985265f114f5072ad02207a866c1c532b29932da349c0e29fb008675438fb327c0dab1620893ccf009ff7:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-host-pid-namespace-sharing
info:
name: Host PID Namespace Sharing
author: princechaddha
severity: critical
description: Checks if containers in Kubernetes pods share the host's process ID namespace, which can pose a security risk.
impact: |
Sharing the host's PID namespace allows processes within the pod to view all of the processes on the host, potentially leading to privilege escalation and other security vulnerabilities.
remediation: |
Ensure that the 'hostPID' field is set to 'false' in Kubernetes Pod specifications to prevent containers from sharing the host's PID namespace.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces
tags: cloud,devops,kubernetes,k8s,devsecops,pods
flow: |
code(1);
for (let pod of template.items) {
set("pod", pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
pod = JSON.parse(template.pod);
if (pod.spec.hostPID) {
let result = (`Pod '${pod.metadata.name}' in namespace '${pod.metadata.namespace}' is sharing the host's PID namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100cdb3127b175107266c1409c70c3318a851490bf743f92923077b0249b9090cdd022100d3371724828565a28e7003ecf9902e3deb93c074444b7a9b2d34c235f454032c:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,46 @@
id: k8s-readonly-fs
info:
name: Enforce Read-Only Filesystem for Containers
author: princechaddha
severity: critical
description: Checks for containers that do not use a read-only filesystem, which can prevent malicious write operations at runtime
impact: |
Not using a read-only filesystem can expose containers to risks of malicious modifications at runtime, compromising the container's integrity and security.
remediation: Configure containers to use read-only filesystems where possible to enhance security and minimize risk of unauthorized data modification
reference:
- https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
tags: cloud,devops,kubernetes,k8s,devsecops,pods
flow: |
code(1);
for (let container of template.items) {
set("container", container)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[].spec.containers[]'
javascript:
- code: |
container = JSON.parse(template.container);
if (!container.securityContext || container.securityContext.readOnlyRootFilesystem !== true) {
let result = (`Container '${container.name}' in pod '${container.metadata.name}' in namespace '${container.metadata.namespace}' does not use a read-only filesystem.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response